xref: /dpdk/drivers/net/hns3/hns3_ethdev_vf.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_alarm.h>
6 #include <ethdev_pci.h>
7 #include <rte_io.h>
8 #include <rte_vfio.h>
9 
10 #include "hns3_ethdev.h"
11 #include "hns3_common.h"
12 #include "hns3_dump.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
17 #include "hns3_dcb.h"
18 #include "hns3_mp.h"
19 #include "hns3_flow.h"
20 
21 #define HNS3VF_KEEP_ALIVE_INTERVAL	2000000 /* us */
22 #define HNS3VF_SERVICE_INTERVAL		1000000 /* us */
23 
24 #define HNS3VF_RESET_WAIT_MS	20
25 #define HNS3VF_RESET_WAIT_CNT	2000
26 
27 /* Reset related Registers */
28 #define HNS3_GLOBAL_RESET_BIT		0
29 #define HNS3_CORE_RESET_BIT		1
30 #define HNS3_IMP_RESET_BIT		2
31 #define HNS3_FUN_RST_ING_B		0
32 
33 enum hns3vf_evt_cause {
34 	HNS3VF_VECTOR0_EVENT_RST,
35 	HNS3VF_VECTOR0_EVENT_MBX,
36 	HNS3VF_VECTOR0_EVENT_OTHER,
37 };
38 
39 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
40 						    uint64_t *levels);
41 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
42 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
43 
44 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
45 				  struct rte_ether_addr *mac_addr);
46 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
47 				     struct rte_ether_addr *mac_addr);
48 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
49 				   __rte_unused int wait_to_complete);
50 
51 static int
52 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
53 {
54 	uint16_t control;
55 	off_t pos;
56 	int ret;
57 
58 	if (!rte_pci_has_capability_list(device)) {
59 		PMD_INIT_LOG(ERR, "Failed to read PCI capability list");
60 		return 0;
61 	}
62 
63 	pos = rte_pci_find_capability(device, RTE_PCI_CAP_ID_MSIX);
64 	if (pos > 0) {
65 		ret = rte_pci_read_config(device, &control, sizeof(control),
66 			pos + RTE_PCI_MSIX_FLAGS);
67 		if (ret < 0) {
68 			PMD_INIT_LOG(ERR, "Failed to read MSIX flags");
69 			return -ENXIO;
70 		}
71 
72 		if (op)
73 			control |= RTE_PCI_MSIX_FLAGS_ENABLE;
74 		else
75 			control &= ~RTE_PCI_MSIX_FLAGS_ENABLE;
76 		ret = rte_pci_write_config(device, &control, sizeof(control),
77 			pos + RTE_PCI_MSIX_FLAGS);
78 		if (ret < 0) {
79 			PMD_INIT_LOG(ERR, "failed to write MSIX flags");
80 			return -ENXIO;
81 		}
82 
83 		return 0;
84 	}
85 
86 	return -ENXIO;
87 }
88 
89 static int
90 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
91 {
92 	/* mac address was checked by upper level interface */
93 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
94 	struct hns3_vf_to_pf_msg req;
95 	int ret;
96 
97 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
98 			 HNS3_MBX_MAC_VLAN_UC_ADD);
99 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
100 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
101 	if (ret) {
102 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
103 				      mac_addr);
104 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
105 			 mac_str, ret);
106 	}
107 	return ret;
108 }
109 
110 static int
111 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
112 {
113 	/* mac address was checked by upper level interface */
114 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
115 	struct hns3_vf_to_pf_msg req;
116 	int ret;
117 
118 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
119 			 HNS3_MBX_MAC_VLAN_UC_REMOVE);
120 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
121 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
122 	if (ret) {
123 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
124 				       mac_addr);
125 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
126 			 mac_str, ret);
127 	}
128 	return ret;
129 }
130 
131 static int
132 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
133 			    struct rte_ether_addr *mac_addr)
134 {
135 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
136 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
137 	struct rte_ether_addr *old_addr;
138 	uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
139 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
140 	struct hns3_vf_to_pf_msg req;
141 	int ret;
142 
143 	/*
144 	 * It has been guaranteed that input parameter named mac_addr is valid
145 	 * address in the rte layer of DPDK framework.
146 	 */
147 	old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
148 	rte_spinlock_lock(&hw->lock);
149 	memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
150 	memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
151 	       RTE_ETHER_ADDR_LEN);
152 
153 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
154 			 HNS3_MBX_MAC_VLAN_UC_MODIFY);
155 	memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN);
156 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
157 	if (ret) {
158 		/*
159 		 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
160 		 * driver. When user has configured a MAC address for VF device
161 		 * by "ip link set ..." command based on the PF device, the hns3
162 		 * PF kernel ethdev driver does not allow VF driver to request
163 		 * reconfiguring a different default MAC address, and return
164 		 * -EPREM to VF driver through mailbox.
165 		 */
166 		if (ret == -EPERM) {
167 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
168 					       old_addr);
169 			hns3_warn(hw, "Has permanent mac addr(%s) for vf",
170 				  mac_str);
171 		} else {
172 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
173 					       mac_addr);
174 			hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
175 				 mac_str, ret);
176 		}
177 		rte_spinlock_unlock(&hw->lock);
178 		return ret;
179 	}
180 
181 	rte_ether_addr_copy(mac_addr,
182 			    (struct rte_ether_addr *)hw->mac.mac_addr);
183 	rte_spinlock_unlock(&hw->lock);
184 
185 	return ret;
186 }
187 
188 static int
189 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
190 		       struct rte_ether_addr *mac_addr)
191 {
192 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193 	struct hns3_vf_to_pf_msg req;
194 	int ret;
195 
196 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
197 			 HNS3_MBX_MAC_VLAN_MC_ADD);
198 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
199 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
200 	if (ret) {
201 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
202 				      mac_addr);
203 		hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
204 			 mac_str, ret);
205 	}
206 
207 	return ret;
208 }
209 
210 static int
211 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
212 			  struct rte_ether_addr *mac_addr)
213 {
214 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
215 	struct hns3_vf_to_pf_msg req;
216 	int ret;
217 
218 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
219 			 HNS3_MBX_MAC_VLAN_MC_REMOVE);
220 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
221 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
222 	if (ret) {
223 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
224 				       mac_addr);
225 		hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
226 			 mac_str, ret);
227 	}
228 
229 	return ret;
230 }
231 
232 static int
233 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
234 			bool en_uc_pmc, bool en_mc_pmc)
235 {
236 	struct hns3_mbx_vf_to_pf_cmd *req;
237 	struct hns3_cmd_desc desc;
238 	int ret;
239 
240 	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
241 
242 	/*
243 	 * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
244 	 * so there are some features for promiscuous/allmulticast mode in hns3
245 	 * VF PMD as below:
246 	 * 1. The promiscuous/allmulticast mode can be configured successfully
247 	 *    only based on the trusted VF device. If based on the non trusted
248 	 *    VF device, configuring promiscuous/allmulticast mode will fail.
249 	 *    The hns3 VF device can be configured as trusted device by hns3 PF
250 	 *    kernel ethdev driver on the host by the following command:
251 	 *      "ip link set <eth num> vf <vf id> turst on"
252 	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
253 	 *    can receive the ingress and outgoing traffic. This includes
254 	 *    all the ingress packets, all the packets sent from the PF and
255 	 *    other VFs on the same physical port.
256 	 * 3. Note: Because of the hardware constraints, By default vlan filter
257 	 *    is enabled and couldn't be turned off based on VF device, so vlan
258 	 *    filter is still effective even in promiscuous mode. If upper
259 	 *    applications don't call rte_eth_dev_vlan_filter API function to
260 	 *    set vlan based on VF device, hns3 VF PMD will can't receive
261 	 *    the packets with vlan tag in promiscuous mode.
262 	 */
263 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
264 	req->msg.code = HNS3_MBX_SET_PROMISC_MODE;
265 	req->msg.en_bc = en_bc_pmc ? 1 : 0;
266 	req->msg.en_uc = en_uc_pmc ? 1 : 0;
267 	req->msg.en_mc = en_mc_pmc ? 1 : 0;
268 	req->msg.en_limit_promisc =
269 		hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
270 
271 	ret = hns3_cmd_send(hw, &desc, 1);
272 	if (ret)
273 		hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
274 
275 	return ret;
276 }
277 
278 static int
279 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
280 {
281 	struct hns3_adapter *hns = dev->data->dev_private;
282 	struct hns3_hw *hw = &hns->hw;
283 	int ret;
284 
285 	ret = hns3vf_set_promisc_mode(hw, true, true, true);
286 	if (ret)
287 		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
288 			ret);
289 	return ret;
290 }
291 
292 static int
293 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
294 {
295 	bool allmulti = dev->data->all_multicast ? true : false;
296 	struct hns3_adapter *hns = dev->data->dev_private;
297 	struct hns3_hw *hw = &hns->hw;
298 	int ret;
299 
300 	ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
301 	if (ret)
302 		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
303 			ret);
304 	return ret;
305 }
306 
307 static int
308 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
309 {
310 	struct hns3_adapter *hns = dev->data->dev_private;
311 	struct hns3_hw *hw = &hns->hw;
312 	int ret;
313 
314 	if (dev->data->promiscuous)
315 		return 0;
316 
317 	ret = hns3vf_set_promisc_mode(hw, true, false, true);
318 	if (ret)
319 		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
320 			ret);
321 	return ret;
322 }
323 
324 static int
325 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
326 {
327 	struct hns3_adapter *hns = dev->data->dev_private;
328 	struct hns3_hw *hw = &hns->hw;
329 	int ret;
330 
331 	if (dev->data->promiscuous)
332 		return 0;
333 
334 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
335 	if (ret)
336 		hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
337 			ret);
338 	return ret;
339 }
340 
341 static int
342 hns3vf_restore_promisc(struct hns3_adapter *hns)
343 {
344 	struct hns3_hw *hw = &hns->hw;
345 	bool allmulti = hw->data->all_multicast ? true : false;
346 
347 	if (hw->data->promiscuous)
348 		return hns3vf_set_promisc_mode(hw, true, true, true);
349 
350 	return hns3vf_set_promisc_mode(hw, true, false, allmulti);
351 }
352 
353 static int
354 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
355 			     bool mmap, enum hns3_ring_type queue_type,
356 			     uint16_t queue_id)
357 {
358 	struct hns3_vf_to_pf_msg req = {0};
359 	const char *op_str;
360 	int ret;
361 
362 	req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
363 		HNS3_MBX_UNMAP_RING_TO_VECTOR;
364 	req.vector_id = (uint8_t)vector_id;
365 	req.ring_num = 1;
366 
367 	if (queue_type == HNS3_RING_TYPE_RX)
368 		req.ring_param[0].int_gl_index = HNS3_RING_GL_RX;
369 	else
370 		req.ring_param[0].int_gl_index = HNS3_RING_GL_TX;
371 	req.ring_param[0].ring_type = queue_type;
372 	req.ring_param[0].tqp_index = queue_id;
373 	op_str = mmap ? "Map" : "Unmap";
374 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
375 	if (ret)
376 		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.",
377 			 op_str, queue_id, req.vector_id, ret);
378 
379 	return ret;
380 }
381 
382 static int
383 hns3vf_dev_configure(struct rte_eth_dev *dev)
384 {
385 	struct hns3_adapter *hns = dev->data->dev_private;
386 	struct hns3_hw *hw = &hns->hw;
387 	struct rte_eth_conf *conf = &dev->data->dev_conf;
388 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
389 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
390 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
391 	struct rte_eth_rss_conf rss_conf;
392 	bool gro_en;
393 	int ret;
394 
395 	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
396 
397 	/*
398 	 * Some versions of hardware network engine does not support
399 	 * individually enable/disable/reset the Tx or Rx queue. These devices
400 	 * must enable/disable/reset Tx and Rx queues at the same time. When the
401 	 * numbers of Tx queues allocated by upper applications are not equal to
402 	 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
403 	 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
404 	 * work as usual. But these fake queues are imperceptible, and can not
405 	 * be used by upper applications.
406 	 */
407 	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
408 	if (ret) {
409 		hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
410 		hw->cfg_max_queues = 0;
411 		return ret;
412 	}
413 
414 	hw->adapter_state = HNS3_NIC_CONFIGURING;
415 	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
416 		hns3_err(hw, "setting link speed/duplex not supported");
417 		ret = -EINVAL;
418 		goto cfg_err;
419 	}
420 
421 	/* When RSS is not configured, redirect the packet queue 0 */
422 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
423 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
424 		rss_conf = conf->rx_adv_conf.rss_conf;
425 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
426 		if (ret)
427 			goto cfg_err;
428 	}
429 
430 	ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
431 	if (ret != 0)
432 		goto cfg_err;
433 
434 	ret = hns3vf_dev_configure_vlan(dev);
435 	if (ret)
436 		goto cfg_err;
437 
438 	/* config hardware GRO */
439 	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
440 	ret = hns3_config_gro(hw, gro_en);
441 	if (ret)
442 		goto cfg_err;
443 
444 	hns3_init_rx_ptype_tble(dev);
445 
446 	hw->adapter_state = HNS3_NIC_CONFIGURED;
447 	return 0;
448 
449 cfg_err:
450 	hw->cfg_max_queues = 0;
451 	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
452 	hw->adapter_state = HNS3_NIC_INITIALIZED;
453 
454 	return ret;
455 }
456 
457 static int
458 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
459 {
460 	struct hns3_vf_to_pf_msg req;
461 	int ret;
462 
463 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0);
464 	memcpy(req.data, &mtu, sizeof(mtu));
465 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
466 	if (ret)
467 		hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
468 
469 	return ret;
470 }
471 
472 static int
473 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
474 {
475 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
476 	uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
477 	int ret;
478 
479 	/*
480 	 * The hns3 PF/VF devices on the same port share the hardware MTU
481 	 * configuration. Currently, we send mailbox to inform hns3 PF kernel
482 	 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
483 	 * there is no need to stop the port for hns3 VF device, and the
484 	 * MTU value issued by hns3 VF PMD must be less than or equal to
485 	 * PF's MTU.
486 	 */
487 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
488 		hns3_err(hw, "Failed to set mtu during resetting");
489 		return -EIO;
490 	}
491 
492 	/*
493 	 * when Rx of scattered packets is off, we have some possibility of
494 	 * using vector Rx process function or simple Rx functions in hns3 PMD.
495 	 * If the input MTU is increased and the maximum length of
496 	 * received packets is greater than the length of a buffer for Rx
497 	 * packet, the hardware network engine needs to use multiple BDs and
498 	 * buffers to store these packets. This will cause problems when still
499 	 * using vector Rx process function or simple Rx function to receiving
500 	 * packets. So, when Rx of scattered packets is off and device is
501 	 * started, it is not permitted to increase MTU so that the maximum
502 	 * length of Rx packets is greater than Rx buffer length.
503 	 */
504 	if (dev->data->dev_started && !dev->data->scattered_rx &&
505 	    frame_size > hw->rx_buf_len) {
506 		hns3_err(hw, "failed to set mtu because current is "
507 			"not scattered rx mode");
508 		return -EOPNOTSUPP;
509 	}
510 
511 	rte_spinlock_lock(&hw->lock);
512 	ret = hns3vf_config_mtu(hw, mtu);
513 	if (ret) {
514 		rte_spinlock_unlock(&hw->lock);
515 		return ret;
516 	}
517 	rte_spinlock_unlock(&hw->lock);
518 
519 	return 0;
520 }
521 
522 static void
523 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
524 {
525 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
526 }
527 
528 static void
529 hns3vf_disable_irq0(struct hns3_hw *hw)
530 {
531 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
532 }
533 
534 static void
535 hns3vf_enable_irq0(struct hns3_hw *hw)
536 {
537 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
538 }
539 
540 void
541 hns3vf_clear_reset_event(struct hns3_hw *hw)
542 {
543 	uint32_t clearval;
544 	uint32_t cmdq_stat_reg;
545 
546 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
547 	clearval = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
548 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, clearval);
549 
550 	hns3vf_enable_irq0(hw);
551 }
552 
553 static enum hns3vf_evt_cause
554 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
555 {
556 	struct hns3_hw *hw = &hns->hw;
557 	enum hns3vf_evt_cause ret;
558 	uint32_t cmdq_stat_reg;
559 	uint32_t rst_ing_reg;
560 	uint32_t val;
561 
562 	/* Fetch the events from their corresponding regs */
563 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
564 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
565 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
566 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
567 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
568 		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
569 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
570 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
571 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
572 		hw->reset.stats.global_cnt++;
573 		hns3_warn(hw, "Global reset detected, clear reset status");
574 
575 		ret = HNS3VF_VECTOR0_EVENT_RST;
576 		goto out;
577 	}
578 
579 	/* Check for vector0 mailbox(=CMDQ RX) event source */
580 	if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
581 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
582 		ret = HNS3VF_VECTOR0_EVENT_MBX;
583 		goto out;
584 	}
585 
586 	val = 0;
587 	ret = HNS3VF_VECTOR0_EVENT_OTHER;
588 
589 out:
590 	*clearval = val;
591 	return ret;
592 }
593 
594 static void
595 hns3vf_interrupt_handler(void *param)
596 {
597 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
598 	struct hns3_adapter *hns = dev->data->dev_private;
599 	struct hns3_hw *hw = &hns->hw;
600 	enum hns3vf_evt_cause event_cause;
601 	uint32_t clearval;
602 
603 	/* Disable interrupt */
604 	hns3vf_disable_irq0(hw);
605 
606 	/* Read out interrupt causes */
607 	event_cause = hns3vf_check_event_cause(hns, &clearval);
608 	/* Clear interrupt causes */
609 	hns3vf_clear_event_cause(hw, clearval);
610 
611 	switch (event_cause) {
612 	case HNS3VF_VECTOR0_EVENT_RST:
613 		hns3_schedule_reset(hns);
614 		break;
615 	case HNS3VF_VECTOR0_EVENT_MBX:
616 		hns3vf_handle_mbx_msg(hw);
617 		break;
618 	default:
619 		break;
620 	}
621 
622 	/* Enable interrupt if it is not caused by reset */
623 	if (event_cause == HNS3VF_VECTOR0_EVENT_MBX ||
624 	    event_cause == HNS3VF_VECTOR0_EVENT_OTHER)
625 		hns3vf_enable_irq0(hw);
626 }
627 
628 void
629 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
630 {
631 	uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
632 				   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
633 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
634 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
635 
636 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
637 		__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
638 					  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
639 }
640 
641 static void
642 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
643 {
644 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS	500
645 
646 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
647 	int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
648 	uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
649 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
650 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
651 	struct hns3_vf_to_pf_msg req;
652 
653 	__atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
654 			 __ATOMIC_RELEASE);
655 
656 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
657 	(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
658 
659 	while (remain_ms > 0) {
660 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
661 		/*
662 		 * The probe process may perform in interrupt thread context.
663 		 * For example, users attach a device in the secondary process.
664 		 * At the moment, the handling mailbox task will be blocked. So
665 		 * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE
666 		 * mailbox from PF driver to get this capability.
667 		 */
668 		hns3vf_handle_mbx_msg(hw);
669 		if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) !=
670 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
671 			break;
672 		remain_ms--;
673 	}
674 
675 	/*
676 	 * When exit above loop, the pf_push_lsc_cap could be one of the three
677 	 * state: unknown (means pf not ack), not_supported, supported.
678 	 * Here config it as 'not_supported' when it's 'unknown' state.
679 	 */
680 	__atomic_compare_exchange(&vf->pf_push_lsc_cap, &exp, &val, 0,
681 				  __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
682 
683 	if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) ==
684 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
685 		hns3_info(hw, "detect PF support push link status change!");
686 	} else {
687 		/*
688 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
689 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
690 		 * the RTE_ETH_DEV_INTR_LSC capability.
691 		 */
692 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
693 	}
694 }
695 
696 static int
697 hns3vf_get_capability(struct hns3_hw *hw)
698 {
699 	int ret;
700 
701 	if (hw->revision < PCI_REVISION_ID_HIP09_A) {
702 		hns3_set_default_dev_specifications(hw);
703 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
704 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
705 		hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
706 		hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
707 		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
708 		hw->rss_info.ipv6_sctp_offload_supported = false;
709 		hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
710 		return 0;
711 	}
712 
713 	ret = hns3_query_dev_specifications(hw);
714 	if (ret) {
715 		PMD_INIT_LOG(ERR,
716 			     "failed to query dev specifications, ret = %d",
717 			     ret);
718 		return ret;
719 	}
720 
721 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
722 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
723 	hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
724 	hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
725 	hw->rss_info.ipv6_sctp_offload_supported = true;
726 	hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
727 
728 	return 0;
729 }
730 
731 static int
732 hns3vf_check_tqp_info(struct hns3_hw *hw)
733 {
734 	if (hw->tqps_num == 0) {
735 		PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
736 		return -EINVAL;
737 	}
738 
739 	if (hw->rss_size_max == 0) {
740 		PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
741 		return -EINVAL;
742 	}
743 
744 	hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
745 
746 	return 0;
747 }
748 
749 static int
750 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
751 {
752 	struct hns3_vf_to_pf_msg req;
753 	uint8_t resp_msg;
754 	int ret;
755 
756 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
757 			 HNS3_MBX_GET_PORT_BASE_VLAN_STATE);
758 	ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg));
759 	if (ret) {
760 		if (ret == -ETIME) {
761 			/*
762 			 * Getting current port based VLAN state from PF driver
763 			 * will not affect VF driver's basic function. Because
764 			 * the VF driver relies on hns3 PF kernel ether driver,
765 			 * to avoid introducing compatibility issues with older
766 			 * version of PF driver, no failure will be returned
767 			 * when the return value is ETIME. This return value has
768 			 * the following scenarios:
769 			 * 1) Firmware didn't return the results in time
770 			 * 2) the result return by firmware is timeout
771 			 * 3) the older version of kernel side PF driver does
772 			 *    not support this mailbox message.
773 			 * For scenarios 1 and 2, it is most likely that a
774 			 * hardware error has occurred, or a hardware reset has
775 			 * occurred. In this case, these errors will be caught
776 			 * by other functions.
777 			 */
778 			PMD_INIT_LOG(WARNING,
779 				"failed to get PVID state for timeout, maybe "
780 				"kernel side PF driver doesn't support this "
781 				"mailbox message, or firmware didn't respond.");
782 			resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
783 		} else {
784 			PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
785 				" ret = %d", ret);
786 			return ret;
787 		}
788 	}
789 	hw->port_base_vlan_cfg.state = resp_msg ?
790 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
791 	return 0;
792 }
793 
794 static int
795 hns3vf_get_queue_info(struct hns3_hw *hw)
796 {
797 #define HNS3VF_TQPS_RSS_INFO_LEN	6
798 	uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
799 	struct hns3_vf_to_pf_msg req;
800 	int ret;
801 
802 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0);
803 	ret = hns3vf_mbx_send(hw, &req, true,
804 			      resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
805 	if (ret) {
806 		PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
807 		return ret;
808 	}
809 
810 	memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
811 	memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
812 
813 	return hns3vf_check_tqp_info(hw);
814 }
815 
816 static void
817 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
818 {
819 	if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
820 		hns3_set_bit(hw->capability,
821 				HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
822 }
823 
824 static int
825 hns3vf_get_num_tc(struct hns3_hw *hw)
826 {
827 	uint8_t num_tc = 0;
828 	uint32_t i;
829 
830 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
831 		if (hw->hw_tc_map & BIT(i))
832 			num_tc++;
833 	}
834 	return num_tc;
835 }
836 
837 static int
838 hns3vf_get_basic_info(struct hns3_hw *hw)
839 {
840 	uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
841 	struct hns3_basic_info *basic_info;
842 	struct hns3_vf_to_pf_msg req;
843 	int ret;
844 
845 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0);
846 	ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg));
847 	if (ret) {
848 		hns3_err(hw, "failed to get basic info from PF, ret = %d.",
849 				ret);
850 		return ret;
851 	}
852 
853 	basic_info = (struct hns3_basic_info *)resp_msg;
854 	hw->hw_tc_map = basic_info->hw_tc_map;
855 	hw->num_tc = hns3vf_get_num_tc(hw);
856 	hw->pf_vf_if_version = basic_info->pf_vf_if_version;
857 	hns3vf_update_caps(hw, basic_info->caps);
858 
859 	return 0;
860 }
861 
862 static int
863 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
864 {
865 	uint8_t host_mac[RTE_ETHER_ADDR_LEN];
866 	struct hns3_vf_to_pf_msg req;
867 	int ret;
868 
869 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0);
870 	ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN);
871 	if (ret) {
872 		hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
873 		return ret;
874 	}
875 
876 	memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
877 
878 	return 0;
879 }
880 
881 static int
882 hns3vf_get_configuration(struct hns3_hw *hw)
883 {
884 	int ret;
885 
886 	hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
887 
888 	/* Get device capability */
889 	ret = hns3vf_get_capability(hw);
890 	if (ret) {
891 		PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
892 		return ret;
893 	}
894 
895 	hns3vf_get_push_lsc_cap(hw);
896 
897 	/* Get basic info from PF */
898 	ret = hns3vf_get_basic_info(hw);
899 	if (ret)
900 		return ret;
901 
902 	/* Get queue configuration from PF */
903 	ret = hns3vf_get_queue_info(hw);
904 	if (ret)
905 		return ret;
906 
907 	/* Get user defined VF MAC addr from PF */
908 	ret = hns3vf_get_host_mac_addr(hw);
909 	if (ret)
910 		return ret;
911 
912 	return hns3vf_get_port_base_vlan_filter_state(hw);
913 }
914 
915 static void
916 hns3vf_request_link_info(struct hns3_hw *hw)
917 {
918 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
919 	struct hns3_vf_to_pf_msg req;
920 	bool send_req;
921 	int ret;
922 
923 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
924 		return;
925 
926 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
927 		   vf->req_link_info_cnt > 0;
928 	if (!send_req)
929 		return;
930 
931 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
932 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
933 	if (ret) {
934 		hns3_err(hw, "failed to fetch link status, ret = %d", ret);
935 		return;
936 	}
937 
938 	if (vf->req_link_info_cnt > 0)
939 		vf->req_link_info_cnt--;
940 }
941 
942 void
943 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
944 			  uint32_t link_speed, uint8_t link_duplex)
945 {
946 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
947 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
948 	struct hns3_mac *mac = &hw->mac;
949 	int ret;
950 
951 	/*
952 	 * PF kernel driver may push link status when VF driver is in resetting,
953 	 * driver will stop polling job in this case, after resetting done
954 	 * driver will start polling job again.
955 	 * When polling job started, driver will get initial link status by
956 	 * sending request to PF kernel driver, then could update link status by
957 	 * process PF kernel driver's link status mailbox message.
958 	 */
959 	if (!__atomic_load_n(&vf->poll_job_started, __ATOMIC_RELAXED))
960 		return;
961 
962 	if (hw->adapter_state != HNS3_NIC_STARTED)
963 		return;
964 
965 	mac->link_status = link_status;
966 	mac->link_speed = link_speed;
967 	mac->link_duplex = link_duplex;
968 	ret = hns3vf_dev_link_update(dev, 0);
969 	if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
970 		hns3_start_report_lse(dev);
971 }
972 
973 static int
974 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
975 {
976 	struct hns3_mbx_vlan_filter *vlan_filter;
977 	struct hns3_vf_to_pf_msg req = {0};
978 	struct hns3_hw *hw = &hns->hw;
979 
980 	req.code = HNS3_MBX_SET_VLAN;
981 	req.subcode = HNS3_MBX_VLAN_FILTER;
982 	vlan_filter = (struct hns3_mbx_vlan_filter *)req.data;
983 	vlan_filter->is_kill = on ? 0 : 1;
984 	vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN);
985 	vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id);
986 
987 	return hns3vf_mbx_send(hw, &req, true, NULL, 0);
988 }
989 
990 static int
991 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
992 {
993 	struct hns3_adapter *hns = dev->data->dev_private;
994 	struct hns3_hw *hw = &hns->hw;
995 	int ret;
996 
997 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
998 		hns3_err(hw,
999 			 "vf set vlan id failed during resetting, vlan_id =%u",
1000 			 vlan_id);
1001 		return -EIO;
1002 	}
1003 	rte_spinlock_lock(&hw->lock);
1004 	ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1005 	rte_spinlock_unlock(&hw->lock);
1006 	if (ret)
1007 		hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1008 			 vlan_id, ret);
1009 
1010 	return ret;
1011 }
1012 
1013 static int
1014 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1015 {
1016 	struct hns3_vf_to_pf_msg req;
1017 	uint8_t msg_data;
1018 	int ret;
1019 
1020 	if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1021 		return 0;
1022 
1023 	msg_data = enable ? 1 : 0;
1024 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
1025 			 HNS3_MBX_ENABLE_VLAN_FILTER);
1026 	memcpy(req.data, &msg_data, sizeof(msg_data));
1027 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
1028 	if (ret)
1029 		hns3_err(hw, "%s vlan filter failed, ret = %d.",
1030 				enable ? "enable" : "disable", ret);
1031 
1032 	return ret;
1033 }
1034 
1035 static int
1036 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1037 {
1038 	struct hns3_vf_to_pf_msg req;
1039 	uint8_t msg_data;
1040 	int ret;
1041 
1042 	msg_data = enable ? 1 : 0;
1043 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
1044 			 HNS3_MBX_VLAN_RX_OFF_CFG);
1045 	memcpy(req.data, &msg_data, sizeof(msg_data));
1046 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
1047 	if (ret)
1048 		hns3_err(hw, "vf %s strip failed, ret = %d.",
1049 				enable ? "enable" : "disable", ret);
1050 
1051 	return ret;
1052 }
1053 
1054 static int
1055 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1056 {
1057 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1058 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1059 	unsigned int tmp_mask;
1060 	int ret = 0;
1061 
1062 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED)) {
1063 		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
1064 			 mask);
1065 		return -EIO;
1066 	}
1067 
1068 	tmp_mask = (unsigned int)mask;
1069 
1070 	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1071 		rte_spinlock_lock(&hw->lock);
1072 		/* Enable or disable VLAN filter */
1073 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1074 			ret = hns3vf_en_vlan_filter(hw, true);
1075 		else
1076 			ret = hns3vf_en_vlan_filter(hw, false);
1077 		rte_spinlock_unlock(&hw->lock);
1078 		if (ret)
1079 			return ret;
1080 	}
1081 
1082 	/* Vlan stripping setting */
1083 	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1084 		rte_spinlock_lock(&hw->lock);
1085 		/* Enable or disable VLAN stripping */
1086 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1087 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1088 		else
1089 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1090 		rte_spinlock_unlock(&hw->lock);
1091 	}
1092 
1093 	return ret;
1094 }
1095 
1096 static int
1097 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1098 {
1099 	struct rte_vlan_filter_conf *vfc;
1100 	struct hns3_hw *hw = &hns->hw;
1101 	uint16_t vlan_id;
1102 	uint64_t vbit;
1103 	uint64_t ids;
1104 	int ret = 0;
1105 	uint32_t i;
1106 
1107 	vfc = &hw->data->vlan_filter_conf;
1108 	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1109 		if (vfc->ids[i] == 0)
1110 			continue;
1111 		ids = vfc->ids[i];
1112 		while (ids) {
1113 			/*
1114 			 * 64 means the num bits of ids, one bit corresponds to
1115 			 * one vlan id
1116 			 */
1117 			vlan_id = 64 * i;
1118 			/* count trailing zeroes */
1119 			vbit = ~ids & (ids - 1);
1120 			/* clear least significant bit set */
1121 			ids ^= (ids ^ (ids - 1)) ^ vbit;
1122 			for (; vbit;) {
1123 				vbit >>= 1;
1124 				vlan_id++;
1125 			}
1126 			ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1127 			if (ret) {
1128 				hns3_err(hw,
1129 					 "VF handle vlan table failed, ret =%d, on = %d",
1130 					 ret, on);
1131 				return ret;
1132 			}
1133 		}
1134 	}
1135 
1136 	return ret;
1137 }
1138 
1139 static int
1140 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1141 {
1142 	return hns3vf_handle_all_vlan_table(hns, 0);
1143 }
1144 
1145 static int
1146 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1147 {
1148 	struct hns3_hw *hw = &hns->hw;
1149 	struct rte_eth_conf *dev_conf;
1150 	bool en;
1151 	int ret;
1152 
1153 	dev_conf = &hw->data->dev_conf;
1154 	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1155 								   : false;
1156 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1157 	if (ret)
1158 		hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1159 			 ret);
1160 	return ret;
1161 }
1162 
1163 static int
1164 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1165 {
1166 	struct hns3_adapter *hns = dev->data->dev_private;
1167 	struct rte_eth_dev_data *data = dev->data;
1168 	struct hns3_hw *hw = &hns->hw;
1169 	int ret;
1170 
1171 	if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1172 	    data->dev_conf.txmode.hw_vlan_reject_untagged ||
1173 	    data->dev_conf.txmode.hw_vlan_insert_pvid) {
1174 		hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1175 			      "or hw_vlan_insert_pvid is not support!");
1176 	}
1177 
1178 	/* Apply vlan offload setting */
1179 	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1180 					RTE_ETH_VLAN_FILTER_MASK);
1181 	if (ret)
1182 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1183 
1184 	return ret;
1185 }
1186 
1187 static int
1188 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1189 {
1190 	struct hns3_vf_to_pf_msg req;
1191 	uint8_t msg_data;
1192 
1193 	msg_data = alive ? 1 : 0;
1194 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0);
1195 	memcpy(req.data, &msg_data, sizeof(msg_data));
1196 	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
1197 }
1198 
1199 static void
1200 hns3vf_keep_alive_handler(void *param)
1201 {
1202 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1203 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1204 	struct hns3_vf_to_pf_msg req;
1205 	struct hns3_hw *hw = &hns->hw;
1206 	int ret;
1207 
1208 	hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0);
1209 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
1210 	if (ret)
1211 		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1212 			 ret);
1213 
1214 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1215 			  eth_dev);
1216 }
1217 
1218 static void
1219 hns3vf_service_handler(void *param)
1220 {
1221 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1222 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1223 	struct hns3_hw *hw = &hns->hw;
1224 
1225 	/*
1226 	 * The query link status and reset processing are executed in the
1227 	 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1228 	 * and the query operation will timeout after 30ms. In the case of
1229 	 * multiple PF/VFs, each query failure timeout causes the IMP reset
1230 	 * interrupt to fail to respond within 100ms.
1231 	 * Before querying the link status, check whether there is a reset
1232 	 * pending, and if so, abandon the query.
1233 	 */
1234 	if (!hns3vf_is_reset_pending(hns)) {
1235 		hns3vf_request_link_info(hw);
1236 		hns3_update_hw_stats(hw);
1237 	} else {
1238 		hns3_warn(hw, "Cancel the query when reset is pending");
1239 	}
1240 
1241 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1242 			  eth_dev);
1243 }
1244 
1245 static void
1246 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1247 {
1248 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT	3
1249 
1250 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1251 
1252 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1253 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1254 
1255 	__atomic_store_n(&vf->poll_job_started, 1, __ATOMIC_RELAXED);
1256 
1257 	hns3vf_service_handler(dev);
1258 }
1259 
1260 static void
1261 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1262 {
1263 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1264 
1265 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1266 
1267 	__atomic_store_n(&vf->poll_job_started, 0, __ATOMIC_RELAXED);
1268 }
1269 
1270 static int
1271 hns3_query_vf_resource(struct hns3_hw *hw)
1272 {
1273 	struct hns3_vf_res_cmd *req;
1274 	struct hns3_cmd_desc desc;
1275 	uint16_t num_msi;
1276 	int ret;
1277 
1278 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1279 	ret = hns3_cmd_send(hw, &desc, 1);
1280 	if (ret) {
1281 		hns3_err(hw, "query vf resource failed, ret = %d", ret);
1282 		return ret;
1283 	}
1284 
1285 	req = (struct hns3_vf_res_cmd *)desc.data;
1286 	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1287 				 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1288 	if (num_msi < HNS3_MIN_VECTOR_NUM) {
1289 		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1290 			 num_msi, HNS3_MIN_VECTOR_NUM);
1291 		return -EINVAL;
1292 	}
1293 
1294 	hw->num_msi = num_msi;
1295 
1296 	return 0;
1297 }
1298 
1299 static int
1300 hns3vf_init_hardware(struct hns3_adapter *hns)
1301 {
1302 	struct hns3_hw *hw = &hns->hw;
1303 	uint16_t mtu = hw->data->mtu;
1304 	int ret;
1305 
1306 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
1307 	if (ret)
1308 		return ret;
1309 
1310 	ret = hns3vf_config_mtu(hw, mtu);
1311 	if (ret)
1312 		goto err_init_hardware;
1313 
1314 	ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1315 	if (ret) {
1316 		PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1317 		goto err_init_hardware;
1318 	}
1319 
1320 	ret = hns3_config_gro(hw, false);
1321 	if (ret) {
1322 		PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1323 		goto err_init_hardware;
1324 	}
1325 
1326 	/*
1327 	 * In the initialization clearing the all hardware mapping relationship
1328 	 * configurations between queues and interrupt vectors is needed, so
1329 	 * some error caused by the residual configurations, such as the
1330 	 * unexpected interrupt, can be avoid.
1331 	 */
1332 	ret = hns3_init_ring_with_vector(hw);
1333 	if (ret) {
1334 		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1335 		goto err_init_hardware;
1336 	}
1337 
1338 	return 0;
1339 
1340 err_init_hardware:
1341 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1342 	return ret;
1343 }
1344 
1345 static int
1346 hns3vf_clear_vport_list(struct hns3_hw *hw)
1347 {
1348 	struct hns3_vf_to_pf_msg req;
1349 
1350 	hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL,
1351 			 HNS3_MBX_VPORT_LIST_CLEAR);
1352 	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
1353 }
1354 
1355 static int
1356 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1357 {
1358 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1359 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1360 	struct hns3_hw *hw = &hns->hw;
1361 	int ret;
1362 
1363 	PMD_INIT_FUNC_TRACE();
1364 
1365 	/* Get hardware io base address from pcie BAR2 IO space */
1366 	hw->io_base = pci_dev->mem_resource[2].addr;
1367 
1368 	ret = hns3_get_pci_revision_id(hw, &hw->revision);
1369 	if (ret)
1370 		return ret;
1371 
1372 	/* Firmware command queue initialize */
1373 	ret = hns3_cmd_init_queue(hw);
1374 	if (ret) {
1375 		PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1376 		goto err_cmd_init_queue;
1377 	}
1378 
1379 	/* Firmware command initialize */
1380 	ret = hns3_cmd_init(hw);
1381 	if (ret) {
1382 		PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1383 		goto err_cmd_init;
1384 	}
1385 
1386 	hns3_tx_push_init(eth_dev);
1387 
1388 	/* Get VF resource */
1389 	ret = hns3_query_vf_resource(hw);
1390 	if (ret)
1391 		goto err_cmd_init;
1392 
1393 	rte_spinlock_init(&hw->mbx_resp.lock);
1394 
1395 	hns3vf_clear_event_cause(hw, 0);
1396 
1397 	ret = rte_intr_callback_register(pci_dev->intr_handle,
1398 					 hns3vf_interrupt_handler, eth_dev);
1399 	if (ret) {
1400 		PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1401 		goto err_intr_callback_register;
1402 	}
1403 
1404 	/* Enable interrupt */
1405 	rte_intr_enable(pci_dev->intr_handle);
1406 	hns3vf_enable_irq0(hw);
1407 
1408 	/* Get configuration from PF */
1409 	ret = hns3vf_get_configuration(hw);
1410 	if (ret) {
1411 		PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1412 		goto err_get_config;
1413 	}
1414 
1415 	ret = hns3_stats_init(hw);
1416 	if (ret)
1417 		goto err_get_config;
1418 
1419 	ret = hns3_queue_to_tc_mapping(hw, hw->tqps_num, hw->tqps_num);
1420 	if (ret) {
1421 		PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1422 		goto err_set_tc_queue;
1423 	}
1424 
1425 	ret = hns3vf_clear_vport_list(hw);
1426 	if (ret) {
1427 		PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1428 		goto err_set_tc_queue;
1429 	}
1430 
1431 	ret = hns3vf_init_hardware(hns);
1432 	if (ret)
1433 		goto err_set_tc_queue;
1434 
1435 	hns3_rss_set_default_args(hw);
1436 
1437 	ret = hns3vf_set_alive(hw, true);
1438 	if (ret) {
1439 		PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1440 		goto err_set_tc_queue;
1441 	}
1442 
1443 	return 0;
1444 
1445 err_set_tc_queue:
1446 	hns3_stats_uninit(hw);
1447 
1448 err_get_config:
1449 	hns3vf_disable_irq0(hw);
1450 	rte_intr_disable(pci_dev->intr_handle);
1451 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1452 			     eth_dev);
1453 err_intr_callback_register:
1454 err_cmd_init:
1455 	hns3_cmd_uninit(hw);
1456 	hns3_cmd_destroy_queue(hw);
1457 err_cmd_init_queue:
1458 	hw->io_base = NULL;
1459 
1460 	return ret;
1461 }
1462 
1463 static void
1464 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1465 {
1466 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1467 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1468 	struct hns3_hw *hw = &hns->hw;
1469 
1470 	PMD_INIT_FUNC_TRACE();
1471 
1472 	hns3_rss_uninit(hns);
1473 	(void)hns3_config_gro(hw, false);
1474 	(void)hns3vf_set_alive(hw, false);
1475 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1476 	hns3_flow_uninit(eth_dev);
1477 	hns3_stats_uninit(hw);
1478 	hns3vf_disable_irq0(hw);
1479 	rte_intr_disable(pci_dev->intr_handle);
1480 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1481 			     eth_dev);
1482 	hns3_cmd_uninit(hw);
1483 	hns3_cmd_destroy_queue(hw);
1484 	hw->io_base = NULL;
1485 }
1486 
1487 static int
1488 hns3vf_do_stop(struct hns3_adapter *hns)
1489 {
1490 	struct hns3_hw *hw = &hns->hw;
1491 	int ret;
1492 
1493 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1494 
1495 	/*
1496 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
1497 	 * prepare reset. At the time of global or IMP reset, the command cannot
1498 	 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1499 	 * accessed during the reset process. So the mbuf can not be released
1500 	 * during reset and is required to be released after the reset is
1501 	 * completed.
1502 	 */
1503 	if (__atomic_load_n(&hw->reset.resetting,  __ATOMIC_RELAXED) == 0)
1504 		hns3_dev_release_mbufs(hns);
1505 
1506 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0) {
1507 		hns3_configure_all_mac_addr(hns, true);
1508 		ret = hns3_reset_all_tqps(hns);
1509 		if (ret) {
1510 			hns3_err(hw, "failed to reset all queues ret = %d",
1511 				 ret);
1512 			return ret;
1513 		}
1514 	}
1515 	return 0;
1516 }
1517 
1518 static int
1519 hns3vf_dev_stop(struct rte_eth_dev *dev)
1520 {
1521 	struct hns3_adapter *hns = dev->data->dev_private;
1522 	struct hns3_hw *hw = &hns->hw;
1523 
1524 	PMD_INIT_FUNC_TRACE();
1525 	dev->data->dev_started = 0;
1526 
1527 	hw->adapter_state = HNS3_NIC_STOPPING;
1528 	hns3_stop_rxtx_datapath(dev);
1529 
1530 	rte_spinlock_lock(&hw->lock);
1531 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
1532 		hns3_stop_tqps(hw);
1533 		hns3vf_do_stop(hns);
1534 		hns3_unmap_rx_interrupt(dev);
1535 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1536 	}
1537 	hns3_rx_scattered_reset(dev);
1538 	hns3vf_stop_poll_job(dev);
1539 	hns3_stop_report_lse(dev);
1540 	rte_spinlock_unlock(&hw->lock);
1541 
1542 	return 0;
1543 }
1544 
1545 static int
1546 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1547 {
1548 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1549 	struct hns3_hw *hw = &hns->hw;
1550 	int ret = 0;
1551 
1552 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1553 		hns3_mp_uninit(eth_dev);
1554 		return 0;
1555 	}
1556 
1557 	if (hw->adapter_state == HNS3_NIC_STARTED)
1558 		ret = hns3vf_dev_stop(eth_dev);
1559 
1560 	hw->adapter_state = HNS3_NIC_CLOSING;
1561 	hns3_reset_abort(hns);
1562 	hw->adapter_state = HNS3_NIC_CLOSED;
1563 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1564 	hns3_configure_all_mc_mac_addr(hns, true);
1565 	hns3vf_remove_all_vlan_table(hns);
1566 	hns3vf_uninit_vf(eth_dev);
1567 	hns3_free_all_queues(eth_dev);
1568 	rte_free(hw->reset.wait_data);
1569 	hns3_mp_uninit(eth_dev);
1570 	hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1571 
1572 	return ret;
1573 }
1574 
1575 static int
1576 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1577 		       __rte_unused int wait_to_complete)
1578 {
1579 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1580 	struct hns3_hw *hw = &hns->hw;
1581 	struct hns3_mac *mac = &hw->mac;
1582 	struct rte_eth_link new_link;
1583 
1584 	memset(&new_link, 0, sizeof(new_link));
1585 	switch (mac->link_speed) {
1586 	case RTE_ETH_SPEED_NUM_10M:
1587 	case RTE_ETH_SPEED_NUM_100M:
1588 	case RTE_ETH_SPEED_NUM_1G:
1589 	case RTE_ETH_SPEED_NUM_10G:
1590 	case RTE_ETH_SPEED_NUM_25G:
1591 	case RTE_ETH_SPEED_NUM_40G:
1592 	case RTE_ETH_SPEED_NUM_50G:
1593 	case RTE_ETH_SPEED_NUM_100G:
1594 	case RTE_ETH_SPEED_NUM_200G:
1595 		if (mac->link_status)
1596 			new_link.link_speed = mac->link_speed;
1597 		break;
1598 	default:
1599 		if (mac->link_status)
1600 			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1601 		break;
1602 	}
1603 
1604 	if (!mac->link_status)
1605 		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1606 
1607 	new_link.link_duplex = mac->link_duplex;
1608 	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1609 	new_link.link_autoneg =
1610 	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1611 
1612 	return rte_eth_linkstatus_set(eth_dev, &new_link);
1613 }
1614 
1615 static int
1616 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1617 {
1618 	struct hns3_hw *hw = &hns->hw;
1619 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1620 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1621 	int ret;
1622 
1623 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1624 	if (ret)
1625 		return ret;
1626 
1627 	hns3_enable_rxd_adv_layout(hw);
1628 
1629 	ret = hns3_init_queues(hns, reset_queue);
1630 	if (ret) {
1631 		hns3_err(hw, "failed to init queues, ret = %d.", ret);
1632 		return ret;
1633 	}
1634 
1635 	return hns3_restore_filter(hns);
1636 }
1637 
1638 static int
1639 hns3vf_dev_start(struct rte_eth_dev *dev)
1640 {
1641 	struct hns3_adapter *hns = dev->data->dev_private;
1642 	struct hns3_hw *hw = &hns->hw;
1643 	int ret;
1644 
1645 	PMD_INIT_FUNC_TRACE();
1646 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED))
1647 		return -EBUSY;
1648 
1649 	rte_spinlock_lock(&hw->lock);
1650 	hw->adapter_state = HNS3_NIC_STARTING;
1651 	ret = hns3vf_do_start(hns, true);
1652 	if (ret) {
1653 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1654 		rte_spinlock_unlock(&hw->lock);
1655 		return ret;
1656 	}
1657 	ret = hns3_map_rx_interrupt(dev);
1658 	if (ret)
1659 		goto map_rx_inter_err;
1660 
1661 	/*
1662 	 * There are three register used to control the status of a TQP
1663 	 * (contains a pair of Tx queue and Rx queue) in the new version network
1664 	 * engine. One is used to control the enabling of Tx queue, the other is
1665 	 * used to control the enabling of Rx queue, and the last is the master
1666 	 * switch used to control the enabling of the tqp. The Tx register and
1667 	 * TQP register must be enabled at the same time to enable a Tx queue.
1668 	 * The same applies to the Rx queue. For the older network enginem, this
1669 	 * function only refresh the enabled flag, and it is used to update the
1670 	 * status of queue in the dpdk framework.
1671 	 */
1672 	ret = hns3_start_all_txqs(dev);
1673 	if (ret)
1674 		goto map_rx_inter_err;
1675 
1676 	ret = hns3_start_all_rxqs(dev);
1677 	if (ret)
1678 		goto start_all_rxqs_fail;
1679 
1680 	hw->adapter_state = HNS3_NIC_STARTED;
1681 	rte_spinlock_unlock(&hw->lock);
1682 
1683 	hns3_rx_scattered_calc(dev);
1684 	hns3_start_rxtx_datapath(dev);
1685 
1686 	/* Enable interrupt of all rx queues before enabling queues */
1687 	hns3_dev_all_rx_queue_intr_enable(hw, true);
1688 	hns3_start_tqps(hw);
1689 
1690 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1691 		hns3vf_dev_link_update(dev, 0);
1692 	hns3vf_start_poll_job(dev);
1693 
1694 	return ret;
1695 
1696 start_all_rxqs_fail:
1697 	hns3_stop_all_txqs(dev);
1698 map_rx_inter_err:
1699 	(void)hns3vf_do_stop(hns);
1700 	hw->adapter_state = HNS3_NIC_CONFIGURED;
1701 	rte_spinlock_unlock(&hw->lock);
1702 
1703 	return ret;
1704 }
1705 
1706 static bool
1707 is_vf_reset_done(struct hns3_hw *hw)
1708 {
1709 #define HNS3_FUN_RST_ING_BITS \
1710 	(BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1711 	 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1712 	 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1713 	 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1714 
1715 	uint32_t val;
1716 
1717 	if (hw->reset.level == HNS3_VF_RESET) {
1718 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1719 		if (val & HNS3_VF_RST_ING_BIT)
1720 			return false;
1721 	} else {
1722 		val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1723 		if (val & HNS3_FUN_RST_ING_BITS)
1724 			return false;
1725 	}
1726 	return true;
1727 }
1728 
1729 static enum hns3_reset_level
1730 hns3vf_detect_reset_event(struct hns3_hw *hw)
1731 {
1732 	enum hns3_reset_level reset = HNS3_NONE_RESET;
1733 	uint32_t cmdq_stat_reg;
1734 
1735 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
1736 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg)
1737 		reset = HNS3_VF_RESET;
1738 
1739 	return reset;
1740 }
1741 
1742 bool
1743 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1744 {
1745 	enum hns3_reset_level last_req;
1746 	struct hns3_hw *hw = &hns->hw;
1747 	enum hns3_reset_level new_req;
1748 
1749 	/*
1750 	 * According to the protocol of PCIe, FLR to a PF device resets the PF
1751 	 * state as well as the SR-IOV extended capability including VF Enable
1752 	 * which means that VFs no longer exist.
1753 	 *
1754 	 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1755 	 * is in FLR stage, the register state of VF device is not reliable,
1756 	 * so register states detection can not be carried out. In this case,
1757 	 * we just ignore the register states and return false to indicate that
1758 	 * there are no other reset states that need to be processed by driver.
1759 	 */
1760 	if (hw->reset.level == HNS3_VF_FULL_RESET)
1761 		return false;
1762 
1763 	/*
1764 	 * Only primary can process can process the reset event,
1765 	 * so don't check reset event in secondary.
1766 	 */
1767 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1768 		return false;
1769 
1770 	new_req = hns3vf_detect_reset_event(hw);
1771 	if (new_req == HNS3_NONE_RESET)
1772 		return false;
1773 
1774 	last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
1775 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
1776 		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1777 		hns3_schedule_delayed_reset(hns);
1778 		hns3_warn(hw, "High level reset detected, delay do reset");
1779 		return true;
1780 	}
1781 
1782 	return false;
1783 }
1784 
1785 static int
1786 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1787 {
1788 #define HNS3_WAIT_PF_RESET_READY_TIME 5
1789 	struct hns3_hw *hw = &hns->hw;
1790 	struct hns3_wait_data *wait_data = hw->reset.wait_data;
1791 	struct timeval tv;
1792 
1793 	if (wait_data->result == HNS3_WAIT_SUCCESS) {
1794 		/*
1795 		 * After vf reset is ready, the PF may not have completed
1796 		 * the reset processing. The vf sending mbox to PF may fail
1797 		 * during the pf reset, so it is better to add extra delay.
1798 		 */
1799 		if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1800 		    hw->reset.level == HNS3_FLR_RESET)
1801 			return 0;
1802 		/* Reset retry process, no need to add extra delay. */
1803 		if (hw->reset.attempts)
1804 			return 0;
1805 		if (wait_data->check_completion == NULL)
1806 			return 0;
1807 
1808 		wait_data->check_completion = NULL;
1809 		wait_data->interval = HNS3_WAIT_PF_RESET_READY_TIME *
1810 			MSEC_PER_SEC * USEC_PER_MSEC;
1811 		wait_data->count = 1;
1812 		wait_data->result = HNS3_WAIT_REQUEST;
1813 		rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1814 				  wait_data);
1815 		hns3_warn(hw, "hardware is ready, delay %d sec for PF reset complete",
1816 				HNS3_WAIT_PF_RESET_READY_TIME);
1817 		return -EAGAIN;
1818 	} else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1819 		hns3_clock_gettime(&tv);
1820 		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1821 			  tv.tv_sec, tv.tv_usec);
1822 		return -ETIME;
1823 	} else if (wait_data->result == HNS3_WAIT_REQUEST)
1824 		return -EAGAIN;
1825 
1826 	wait_data->hns = hns;
1827 	wait_data->check_completion = is_vf_reset_done;
1828 	wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1829 				HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1830 	wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1831 	wait_data->count = HNS3VF_RESET_WAIT_CNT;
1832 	wait_data->result = HNS3_WAIT_REQUEST;
1833 	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1834 	return -EAGAIN;
1835 }
1836 
1837 static int
1838 hns3vf_prepare_reset(struct hns3_adapter *hns)
1839 {
1840 	struct hns3_vf_to_pf_msg req;
1841 	struct hns3_hw *hw = &hns->hw;
1842 	int ret;
1843 
1844 	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1845 		hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0);
1846 		ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
1847 		if (ret)
1848 			return ret;
1849 	}
1850 	__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
1851 
1852 	return 0;
1853 }
1854 
1855 static int
1856 hns3vf_stop_service(struct hns3_adapter *hns)
1857 {
1858 	struct hns3_hw *hw = &hns->hw;
1859 	struct rte_eth_dev *eth_dev;
1860 
1861 	eth_dev = &rte_eth_devices[hw->data->port_id];
1862 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1863 		/*
1864 		 * Make sure call update link status before hns3vf_stop_poll_job
1865 		 * because update link status depend on polling job exist.
1866 		 */
1867 		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
1868 					  hw->mac.link_duplex);
1869 		hns3vf_stop_poll_job(eth_dev);
1870 	}
1871 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1872 
1873 	hns3_stop_rxtx_datapath(eth_dev);
1874 
1875 	rte_spinlock_lock(&hw->lock);
1876 	if (hw->adapter_state == HNS3_NIC_STARTED ||
1877 	    hw->adapter_state == HNS3_NIC_STOPPING) {
1878 		hns3_enable_all_queues(hw, false);
1879 		hns3vf_do_stop(hns);
1880 		hw->reset.mbuf_deferred_free = true;
1881 	} else
1882 		hw->reset.mbuf_deferred_free = false;
1883 
1884 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1885 
1886 	/*
1887 	 * It is cumbersome for hardware to pick-and-choose entries for deletion
1888 	 * from table space. Hence, for function reset software intervention is
1889 	 * required to delete the entries.
1890 	 */
1891 	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
1892 		hns3_configure_all_mc_mac_addr(hns, true);
1893 	rte_spinlock_unlock(&hw->lock);
1894 
1895 	return 0;
1896 }
1897 
1898 static int
1899 hns3vf_start_service(struct hns3_adapter *hns)
1900 {
1901 	struct hns3_hw *hw = &hns->hw;
1902 	struct rte_eth_dev *eth_dev;
1903 
1904 	eth_dev = &rte_eth_devices[hw->data->port_id];
1905 	hns3_start_rxtx_datapath(eth_dev);
1906 
1907 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1908 			  eth_dev);
1909 
1910 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1911 		hns3vf_start_poll_job(eth_dev);
1912 
1913 		/* Enable interrupt of all rx queues before enabling queues */
1914 		hns3_dev_all_rx_queue_intr_enable(hw, true);
1915 		/*
1916 		 * Enable state of each rxq and txq will be recovered after
1917 		 * reset, so we need to restore them before enable all tqps;
1918 		 */
1919 		hns3_restore_tqp_enable_state(hw);
1920 		/*
1921 		 * When finished the initialization, enable queues to receive
1922 		 * and transmit packets.
1923 		 */
1924 		hns3_enable_all_queues(hw, true);
1925 	}
1926 
1927 	return 0;
1928 }
1929 
1930 static int
1931 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1932 {
1933 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1934 	struct rte_ether_addr *hw_mac;
1935 	int ret;
1936 
1937 	/*
1938 	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
1939 	 * on the host by "ip link set ..." command. If the hns3 PF kernel
1940 	 * ethdev driver sets the MAC address for VF device after the
1941 	 * initialization of the related VF device, the PF driver will notify
1942 	 * VF driver to reset VF device to make the new MAC address effective
1943 	 * immediately. The hns3 VF PMD should check whether the MAC
1944 	 * address has been changed by the PF kernel ethdev driver, if changed
1945 	 * VF driver should configure hardware using the new MAC address in the
1946 	 * recovering hardware configuration stage of the reset process.
1947 	 */
1948 	ret = hns3vf_get_host_mac_addr(hw);
1949 	if (ret)
1950 		return ret;
1951 
1952 	hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1953 	ret = rte_is_zero_ether_addr(hw_mac);
1954 	if (ret) {
1955 		rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1956 	} else {
1957 		ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1958 		if (!ret) {
1959 			rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1960 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1961 					      &hw->data->mac_addrs[0]);
1962 			hns3_warn(hw, "Default MAC address has been changed to:"
1963 				  " %s by the host PF kernel ethdev driver",
1964 				  mac_str);
1965 		}
1966 	}
1967 
1968 	return 0;
1969 }
1970 
1971 static int
1972 hns3vf_restore_conf(struct hns3_adapter *hns)
1973 {
1974 	struct hns3_hw *hw = &hns->hw;
1975 	int ret;
1976 
1977 	ret = hns3vf_check_default_mac_change(hw);
1978 	if (ret)
1979 		return ret;
1980 
1981 	ret = hns3_configure_all_mac_addr(hns, false);
1982 	if (ret)
1983 		return ret;
1984 
1985 	ret = hns3_configure_all_mc_mac_addr(hns, false);
1986 	if (ret)
1987 		goto err_mc_mac;
1988 
1989 	ret = hns3vf_restore_promisc(hns);
1990 	if (ret)
1991 		goto err_vlan_table;
1992 
1993 	ret = hns3vf_restore_vlan_conf(hns);
1994 	if (ret)
1995 		goto err_vlan_table;
1996 
1997 	ret = hns3vf_get_port_base_vlan_filter_state(hw);
1998 	if (ret)
1999 		goto err_vlan_table;
2000 
2001 	ret = hns3_restore_rx_interrupt(hw);
2002 	if (ret)
2003 		goto err_vlan_table;
2004 
2005 	ret = hns3_restore_gro_conf(hw);
2006 	if (ret)
2007 		goto err_vlan_table;
2008 
2009 	if (hw->adapter_state == HNS3_NIC_STARTED) {
2010 		ret = hns3vf_do_start(hns, false);
2011 		if (ret)
2012 			goto err_vlan_table;
2013 		hns3_info(hw, "hns3vf dev restart successful!");
2014 	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
2015 		hw->adapter_state = HNS3_NIC_CONFIGURED;
2016 
2017 	ret = hns3vf_set_alive(hw, true);
2018 	if (ret) {
2019 		hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2020 		goto err_vlan_table;
2021 	}
2022 
2023 	return 0;
2024 
2025 err_vlan_table:
2026 	hns3_configure_all_mc_mac_addr(hns, true);
2027 err_mc_mac:
2028 	hns3_configure_all_mac_addr(hns, true);
2029 	return ret;
2030 }
2031 
2032 static enum hns3_reset_level
2033 hns3vf_get_reset_level(struct hns3_hw *hw, uint64_t *levels)
2034 {
2035 	enum hns3_reset_level reset_level;
2036 
2037 	/* return the highest priority reset level amongst all */
2038 	if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2039 		reset_level = HNS3_VF_RESET;
2040 	else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2041 		reset_level = HNS3_VF_FULL_RESET;
2042 	else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2043 		reset_level = HNS3_VF_PF_FUNC_RESET;
2044 	else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2045 		reset_level = HNS3_VF_FUNC_RESET;
2046 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2047 		reset_level = HNS3_FLR_RESET;
2048 	else
2049 		reset_level = HNS3_NONE_RESET;
2050 
2051 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2052 		return HNS3_NONE_RESET;
2053 
2054 	return reset_level;
2055 }
2056 
2057 static void
2058 hns3vf_reset_service(void *param)
2059 {
2060 	struct hns3_adapter *hns = (struct hns3_adapter *)param;
2061 	struct hns3_hw *hw = &hns->hw;
2062 	enum hns3_reset_level reset_level;
2063 	struct timeval tv_delta;
2064 	struct timeval tv_start;
2065 	struct timeval tv;
2066 	uint64_t msec;
2067 
2068 	/*
2069 	 * The interrupt is not triggered within the delay time.
2070 	 * The interrupt may have been lost. It is necessary to handle
2071 	 * the interrupt to recover from the error.
2072 	 */
2073 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2074 			    SCHEDULE_DEFERRED) {
2075 		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
2076 				 __ATOMIC_RELAXED);
2077 		hns3_err(hw, "Handling interrupts in delayed tasks");
2078 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2079 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2080 		if (reset_level == HNS3_NONE_RESET) {
2081 			hns3_err(hw, "No reset level is set, try global reset");
2082 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2083 		}
2084 	}
2085 	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
2086 
2087 	/*
2088 	 * Hardware reset has been notified, we now have to poll & check if
2089 	 * hardware has actually completed the reset sequence.
2090 	 */
2091 	reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2092 	if (reset_level != HNS3_NONE_RESET) {
2093 		hns3_clock_gettime(&tv_start);
2094 		hns3_reset_process(hns, reset_level);
2095 		hns3_clock_gettime(&tv);
2096 		timersub(&tv, &tv_start, &tv_delta);
2097 		msec = hns3_clock_calctime_ms(&tv_delta);
2098 		if (msec > HNS3_RESET_PROCESS_MS)
2099 			hns3_err(hw, "%d handle long time delta %" PRIu64
2100 				 " ms time=%ld.%.6ld",
2101 				 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2102 	}
2103 }
2104 
2105 static int
2106 hns3vf_reinit_dev(struct hns3_adapter *hns)
2107 {
2108 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2109 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2110 	struct hns3_hw *hw = &hns->hw;
2111 	int ret;
2112 
2113 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2114 		rte_intr_disable(pci_dev->intr_handle);
2115 		ret = rte_pci_set_bus_master(pci_dev, true);
2116 		if (ret < 0) {
2117 			hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2118 			return ret;
2119 		}
2120 	}
2121 
2122 	/* Firmware command initialize */
2123 	ret = hns3_cmd_init(hw);
2124 	if (ret) {
2125 		hns3_err(hw, "Failed to init cmd: %d", ret);
2126 		return ret;
2127 	}
2128 
2129 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2130 		/*
2131 		 * UIO enables msix by writing the pcie configuration space
2132 		 * vfio_pci enables msix in rte_intr_enable.
2133 		 */
2134 		if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2135 		    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2136 			ret = hns3vf_enable_msix(pci_dev, true);
2137 			if (ret != 0) {
2138 				hns3_err(hw, "Failed to enable msix");
2139 				return ret;
2140 			}
2141 		}
2142 
2143 		rte_intr_enable(pci_dev->intr_handle);
2144 	}
2145 
2146 	ret = hns3_reset_all_tqps(hns);
2147 	if (ret) {
2148 		hns3_err(hw, "Failed to reset all queues: %d", ret);
2149 		return ret;
2150 	}
2151 
2152 	ret = hns3vf_init_hardware(hns);
2153 	if (ret) {
2154 		hns3_err(hw, "Failed to init hardware: %d", ret);
2155 		return ret;
2156 	}
2157 
2158 	return 0;
2159 }
2160 
2161 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2162 	.dev_configure      = hns3vf_dev_configure,
2163 	.dev_start          = hns3vf_dev_start,
2164 	.dev_stop           = hns3vf_dev_stop,
2165 	.dev_close          = hns3vf_dev_close,
2166 	.mtu_set            = hns3vf_dev_mtu_set,
2167 	.promiscuous_enable = hns3vf_dev_promiscuous_enable,
2168 	.promiscuous_disable = hns3vf_dev_promiscuous_disable,
2169 	.allmulticast_enable = hns3vf_dev_allmulticast_enable,
2170 	.allmulticast_disable = hns3vf_dev_allmulticast_disable,
2171 	.stats_get          = hns3_stats_get,
2172 	.stats_reset        = hns3_stats_reset,
2173 	.xstats_get         = hns3_dev_xstats_get,
2174 	.xstats_get_names   = hns3_dev_xstats_get_names,
2175 	.xstats_reset       = hns3_dev_xstats_reset,
2176 	.xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2177 	.xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2178 	.dev_infos_get      = hns3_dev_infos_get,
2179 	.fw_version_get     = hns3_fw_version_get,
2180 	.rx_queue_setup     = hns3_rx_queue_setup,
2181 	.tx_queue_setup     = hns3_tx_queue_setup,
2182 	.rx_queue_release   = hns3_dev_rx_queue_release,
2183 	.tx_queue_release   = hns3_dev_tx_queue_release,
2184 	.rx_queue_start     = hns3_dev_rx_queue_start,
2185 	.rx_queue_stop      = hns3_dev_rx_queue_stop,
2186 	.tx_queue_start     = hns3_dev_tx_queue_start,
2187 	.tx_queue_stop      = hns3_dev_tx_queue_stop,
2188 	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2189 	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2190 	.rxq_info_get       = hns3_rxq_info_get,
2191 	.txq_info_get       = hns3_txq_info_get,
2192 	.rx_burst_mode_get  = hns3_rx_burst_mode_get,
2193 	.tx_burst_mode_get  = hns3_tx_burst_mode_get,
2194 	.mac_addr_add       = hns3_add_mac_addr,
2195 	.mac_addr_remove    = hns3_remove_mac_addr,
2196 	.mac_addr_set       = hns3vf_set_default_mac_addr,
2197 	.set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2198 	.link_update        = hns3vf_dev_link_update,
2199 	.rss_hash_update    = hns3_dev_rss_hash_update,
2200 	.rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2201 	.reta_update        = hns3_dev_rss_reta_update,
2202 	.reta_query         = hns3_dev_rss_reta_query,
2203 	.flow_ops_get       = hns3_dev_flow_ops_get,
2204 	.vlan_filter_set    = hns3vf_vlan_filter_set,
2205 	.vlan_offload_set   = hns3vf_vlan_offload_set,
2206 	.get_reg            = hns3_get_regs,
2207 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2208 	.tx_done_cleanup    = hns3_tx_done_cleanup,
2209 	.eth_dev_priv_dump  = hns3_eth_dev_priv_dump,
2210 	.eth_rx_descriptor_dump = hns3_rx_descriptor_dump,
2211 	.eth_tx_descriptor_dump = hns3_tx_descriptor_dump,
2212 	.get_monitor_addr       = hns3_get_monitor_addr,
2213 };
2214 
2215 static const struct hns3_reset_ops hns3vf_reset_ops = {
2216 	.reset_service       = hns3vf_reset_service,
2217 	.stop_service        = hns3vf_stop_service,
2218 	.prepare_reset       = hns3vf_prepare_reset,
2219 	.wait_hardware_ready = hns3vf_wait_hardware_ready,
2220 	.reinit_dev          = hns3vf_reinit_dev,
2221 	.restore_conf        = hns3vf_restore_conf,
2222 	.start_service       = hns3vf_start_service,
2223 };
2224 
2225 static void
2226 hns3vf_init_hw_ops(struct hns3_hw *hw)
2227 {
2228 	hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2229 	hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2230 	hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2231 	hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2232 	hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2233 }
2234 
2235 static int
2236 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2237 {
2238 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2239 	struct hns3_hw *hw = &hns->hw;
2240 	int ret;
2241 
2242 	PMD_INIT_FUNC_TRACE();
2243 
2244 	hns3_flow_init(eth_dev);
2245 
2246 	hns3_set_rxtx_function(eth_dev);
2247 	eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2248 	eth_dev->rx_queue_count = hns3_rx_queue_count;
2249 	ret = hns3_mp_init(eth_dev);
2250 	if (ret)
2251 		goto err_mp_init;
2252 
2253 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2254 		hns3_tx_push_init(eth_dev);
2255 		return 0;
2256 	}
2257 
2258 	hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2259 	hns->is_vf = true;
2260 	hw->data = eth_dev->data;
2261 	hns3_parse_devargs(eth_dev);
2262 
2263 	ret = hns3_reset_init(hw);
2264 	if (ret)
2265 		goto err_init_reset;
2266 	hw->reset.ops = &hns3vf_reset_ops;
2267 
2268 	hns3vf_init_hw_ops(hw);
2269 	ret = hns3vf_init_vf(eth_dev);
2270 	if (ret) {
2271 		PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2272 		goto err_init_vf;
2273 	}
2274 
2275 	ret = hns3_init_mac_addrs(eth_dev);
2276 	if (ret != 0)
2277 		goto err_init_mac_addrs;
2278 
2279 	hw->adapter_state = HNS3_NIC_INITIALIZED;
2280 
2281 	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
2282 			    SCHEDULE_PENDING) {
2283 		hns3_err(hw, "Reschedule reset service after dev_init");
2284 		hns3_schedule_reset(hns);
2285 	} else {
2286 		/* IMP will wait ready flag before reset */
2287 		hns3_notify_reset_ready(hw, false);
2288 	}
2289 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2290 			  eth_dev);
2291 	return 0;
2292 
2293 err_init_mac_addrs:
2294 	hns3vf_uninit_vf(eth_dev);
2295 
2296 err_init_vf:
2297 	rte_free(hw->reset.wait_data);
2298 
2299 err_init_reset:
2300 	hns3_mp_uninit(eth_dev);
2301 
2302 err_mp_init:
2303 	eth_dev->dev_ops = NULL;
2304 	eth_dev->rx_pkt_burst = NULL;
2305 	eth_dev->rx_descriptor_status = NULL;
2306 	eth_dev->tx_pkt_burst = NULL;
2307 	eth_dev->tx_pkt_prepare = NULL;
2308 	eth_dev->tx_descriptor_status = NULL;
2309 
2310 	return ret;
2311 }
2312 
2313 static int
2314 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2315 {
2316 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2317 	struct hns3_hw *hw = &hns->hw;
2318 
2319 	PMD_INIT_FUNC_TRACE();
2320 
2321 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2322 		hns3_mp_uninit(eth_dev);
2323 		return 0;
2324 	}
2325 
2326 	if (hw->adapter_state < HNS3_NIC_CLOSING)
2327 		hns3vf_dev_close(eth_dev);
2328 
2329 	hw->adapter_state = HNS3_NIC_REMOVED;
2330 	return 0;
2331 }
2332 
2333 static int
2334 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2335 		     struct rte_pci_device *pci_dev)
2336 {
2337 	return rte_eth_dev_pci_generic_probe(pci_dev,
2338 					     sizeof(struct hns3_adapter),
2339 					     hns3vf_dev_init);
2340 }
2341 
2342 static int
2343 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2344 {
2345 	return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2346 }
2347 
2348 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2349 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2350 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2351 	{ .vendor_id = 0, }, /* sentinel */
2352 };
2353 
2354 static struct rte_pci_driver rte_hns3vf_pmd = {
2355 	.id_table = pci_id_hns3vf_map,
2356 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2357 	.probe = eth_hns3vf_pci_probe,
2358 	.remove = eth_hns3vf_pci_remove,
2359 };
2360 
2361 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2362 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2363 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2364 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2365 		HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2366 		HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2367 		HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2368 		HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");
2369