xref: /dpdk/drivers/net/hns3/hns3_ethdev_vf.c (revision d14c995b775a9b5910c51c3ab3685b320736f3f6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 HiSilicon Limited.
3  */
4 
5 #include <rte_alarm.h>
6 #include <ethdev_pci.h>
7 #include <rte_io.h>
8 #include <rte_vfio.h>
9 
10 #include "hns3_ethdev.h"
11 #include "hns3_common.h"
12 #include "hns3_dump.h"
13 #include "hns3_logs.h"
14 #include "hns3_rxtx.h"
15 #include "hns3_regs.h"
16 #include "hns3_intr.h"
17 #include "hns3_dcb.h"
18 #include "hns3_mp.h"
19 #include "hns3_flow.h"
20 
21 #define HNS3VF_KEEP_ALIVE_INTERVAL	2000000 /* us */
22 #define HNS3VF_SERVICE_INTERVAL		1000000 /* us */
23 
24 #define HNS3VF_RESET_WAIT_MS	20
25 #define HNS3VF_RESET_WAIT_CNT	2000
26 
27 /* Reset related Registers */
28 #define HNS3_GLOBAL_RESET_BIT		0
29 #define HNS3_CORE_RESET_BIT		1
30 #define HNS3_IMP_RESET_BIT		2
31 #define HNS3_FUN_RST_ING_B		0
32 
33 enum hns3vf_evt_cause {
34 	HNS3VF_VECTOR0_EVENT_RST,
35 	HNS3VF_VECTOR0_EVENT_MBX,
36 	HNS3VF_VECTOR0_EVENT_OTHER,
37 };
38 
39 static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
40 						    RTE_ATOMIC(uint64_t) *levels);
41 static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
42 static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
43 
44 static int hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
45 				  struct rte_ether_addr *mac_addr);
46 static int hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
47 				     struct rte_ether_addr *mac_addr);
48 static int hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
49 				   __rte_unused int wait_to_complete);
50 
51 static int
52 hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
53 {
54 	uint16_t control;
55 	off_t pos;
56 	int ret;
57 
58 	if (!rte_pci_has_capability_list(device)) {
59 		PMD_INIT_LOG(ERR, "Failed to read PCI capability list");
60 		return 0;
61 	}
62 
63 	pos = rte_pci_find_capability(device, RTE_PCI_CAP_ID_MSIX);
64 	if (pos > 0) {
65 		ret = rte_pci_read_config(device, &control, sizeof(control),
66 			pos + RTE_PCI_MSIX_FLAGS);
67 		if (ret < 0) {
68 			PMD_INIT_LOG(ERR, "Failed to read MSIX flags");
69 			return -ENXIO;
70 		}
71 
72 		if (op)
73 			control |= RTE_PCI_MSIX_FLAGS_ENABLE;
74 		else
75 			control &= ~RTE_PCI_MSIX_FLAGS_ENABLE;
76 		ret = rte_pci_write_config(device, &control, sizeof(control),
77 			pos + RTE_PCI_MSIX_FLAGS);
78 		if (ret < 0) {
79 			PMD_INIT_LOG(ERR, "failed to write MSIX flags");
80 			return -ENXIO;
81 		}
82 
83 		return 0;
84 	}
85 
86 	return -ENXIO;
87 }
88 
89 static int
90 hns3vf_add_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
91 {
92 	/* mac address was checked by upper level interface */
93 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
94 	struct hns3_vf_to_pf_msg req;
95 	int ret;
96 
97 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
98 			 HNS3_MBX_MAC_VLAN_UC_ADD);
99 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
100 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
101 	if (ret) {
102 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
103 				      mac_addr);
104 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
105 			 mac_str, ret);
106 	}
107 	return ret;
108 }
109 
110 static int
111 hns3vf_remove_uc_mac_addr(struct hns3_hw *hw, struct rte_ether_addr *mac_addr)
112 {
113 	/* mac address was checked by upper level interface */
114 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
115 	struct hns3_vf_to_pf_msg req;
116 	int ret;
117 
118 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
119 			 HNS3_MBX_MAC_VLAN_UC_REMOVE);
120 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
121 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
122 	if (ret) {
123 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
124 				       mac_addr);
125 		hns3_err(hw, "failed to add uc mac addr(%s), ret = %d",
126 			 mac_str, ret);
127 	}
128 	return ret;
129 }
130 
131 static int
132 hns3vf_set_default_mac_addr(struct rte_eth_dev *dev,
133 			    struct rte_ether_addr *mac_addr)
134 {
135 #define HNS3_TWO_ETHER_ADDR_LEN (RTE_ETHER_ADDR_LEN * 2)
136 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
137 	struct rte_ether_addr *old_addr;
138 	uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */
139 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
140 	struct hns3_vf_to_pf_msg req;
141 	int ret;
142 
143 	/*
144 	 * It has been guaranteed that input parameter named mac_addr is valid
145 	 * address in the rte layer of DPDK framework.
146 	 */
147 	old_addr = (struct rte_ether_addr *)hw->mac.mac_addr;
148 	rte_spinlock_lock(&hw->lock);
149 	memcpy(addr_bytes, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
150 	memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes,
151 	       RTE_ETHER_ADDR_LEN);
152 
153 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST,
154 			 HNS3_MBX_MAC_VLAN_UC_MODIFY);
155 	memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN);
156 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
157 	if (ret) {
158 		/*
159 		 * The hns3 VF PMD depends on the hns3 PF kernel ethdev
160 		 * driver. When user has configured a MAC address for VF device
161 		 * by "ip link set ..." command based on the PF device, the hns3
162 		 * PF kernel ethdev driver does not allow VF driver to request
163 		 * reconfiguring a different default MAC address, and return
164 		 * -EPREM to VF driver through mailbox.
165 		 */
166 		if (ret == -EPERM) {
167 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
168 					       old_addr);
169 			hns3_warn(hw, "Has permanent mac addr(%s) for vf",
170 				  mac_str);
171 		} else {
172 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
173 					       mac_addr);
174 			hns3_err(hw, "Failed to set mac addr(%s) for vf: %d",
175 				 mac_str, ret);
176 		}
177 		rte_spinlock_unlock(&hw->lock);
178 		return ret;
179 	}
180 
181 	rte_ether_addr_copy(mac_addr,
182 			    (struct rte_ether_addr *)hw->mac.mac_addr);
183 	rte_spinlock_unlock(&hw->lock);
184 
185 	return ret;
186 }
187 
188 static int
189 hns3vf_add_mc_mac_addr(struct hns3_hw *hw,
190 		       struct rte_ether_addr *mac_addr)
191 {
192 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
193 	struct hns3_vf_to_pf_msg req;
194 	int ret;
195 
196 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
197 			 HNS3_MBX_MAC_VLAN_MC_ADD);
198 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
199 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
200 	if (ret) {
201 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
202 				      mac_addr);
203 		hns3_err(hw, "Failed to add mc mac addr(%s) for vf: %d",
204 			 mac_str, ret);
205 	}
206 
207 	return ret;
208 }
209 
210 static int
211 hns3vf_remove_mc_mac_addr(struct hns3_hw *hw,
212 			  struct rte_ether_addr *mac_addr)
213 {
214 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
215 	struct hns3_vf_to_pf_msg req;
216 	int ret;
217 
218 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST,
219 			 HNS3_MBX_MAC_VLAN_MC_REMOVE);
220 	memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN);
221 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
222 	if (ret) {
223 		hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
224 				       mac_addr);
225 		hns3_err(hw, "Failed to remove mc mac addr(%s) for vf: %d",
226 			 mac_str, ret);
227 	}
228 
229 	return ret;
230 }
231 
232 static int
233 hns3vf_set_promisc_mode(struct hns3_hw *hw, bool en_bc_pmc,
234 			bool en_uc_pmc, bool en_mc_pmc)
235 {
236 	struct hns3_mbx_vf_to_pf_cmd *req;
237 	struct hns3_cmd_desc desc;
238 	int ret;
239 
240 	req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data;
241 
242 	/*
243 	 * The hns3 VF PMD depends on the hns3 PF kernel ethdev driver,
244 	 * so there are some features for promiscuous/allmulticast mode in hns3
245 	 * VF PMD as below:
246 	 * 1. The promiscuous/allmulticast mode can be configured successfully
247 	 *    only based on the trusted VF device. If based on the non trusted
248 	 *    VF device, configuring promiscuous/allmulticast mode will fail.
249 	 *    The hns3 VF device can be configured as trusted device by hns3 PF
250 	 *    kernel ethdev driver on the host by the following command:
251 	 *      "ip link set <eth num> vf <vf id> turst on"
252 	 * 2. After the promiscuous mode is configured successfully, hns3 VF PMD
253 	 *    can receive the ingress and outgoing traffic. This includes
254 	 *    all the ingress packets, all the packets sent from the PF and
255 	 *    other VFs on the same physical port.
256 	 * 3. Note: Because of the hardware constraints, By default vlan filter
257 	 *    is enabled and couldn't be turned off based on VF device, so vlan
258 	 *    filter is still effective even in promiscuous mode. If upper
259 	 *    applications don't call rte_eth_dev_vlan_filter API function to
260 	 *    set vlan based on VF device, hns3 VF PMD will can't receive
261 	 *    the packets with vlan tag in promiscuous mode.
262 	 */
263 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
264 	req->msg.code = HNS3_MBX_SET_PROMISC_MODE;
265 	req->msg.en_bc = en_bc_pmc ? 1 : 0;
266 	req->msg.en_uc = en_uc_pmc ? 1 : 0;
267 	req->msg.en_mc = en_mc_pmc ? 1 : 0;
268 	req->msg.en_limit_promisc =
269 		hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0;
270 
271 	ret = hns3_cmd_send(hw, &desc, 1);
272 	if (ret)
273 		hns3_err(hw, "Set promisc mode fail, ret = %d", ret);
274 
275 	return ret;
276 }
277 
278 static int
279 hns3vf_dev_promiscuous_enable(struct rte_eth_dev *dev)
280 {
281 	struct hns3_adapter *hns = dev->data->dev_private;
282 	struct hns3_hw *hw = &hns->hw;
283 	int ret;
284 
285 	ret = hns3vf_set_promisc_mode(hw, true, true, true);
286 	if (ret)
287 		hns3_err(hw, "Failed to enable promiscuous mode, ret = %d",
288 			ret);
289 	return ret;
290 }
291 
292 static int
293 hns3vf_dev_promiscuous_disable(struct rte_eth_dev *dev)
294 {
295 	bool allmulti = dev->data->all_multicast ? true : false;
296 	struct hns3_adapter *hns = dev->data->dev_private;
297 	struct hns3_hw *hw = &hns->hw;
298 	int ret;
299 
300 	ret = hns3vf_set_promisc_mode(hw, true, false, allmulti);
301 	if (ret)
302 		hns3_err(hw, "Failed to disable promiscuous mode, ret = %d",
303 			ret);
304 	return ret;
305 }
306 
307 static int
308 hns3vf_dev_allmulticast_enable(struct rte_eth_dev *dev)
309 {
310 	struct hns3_adapter *hns = dev->data->dev_private;
311 	struct hns3_hw *hw = &hns->hw;
312 	int ret;
313 
314 	if (dev->data->promiscuous)
315 		return 0;
316 
317 	ret = hns3vf_set_promisc_mode(hw, true, false, true);
318 	if (ret)
319 		hns3_err(hw, "Failed to enable allmulticast mode, ret = %d",
320 			ret);
321 	return ret;
322 }
323 
324 static int
325 hns3vf_dev_allmulticast_disable(struct rte_eth_dev *dev)
326 {
327 	struct hns3_adapter *hns = dev->data->dev_private;
328 	struct hns3_hw *hw = &hns->hw;
329 	int ret;
330 
331 	if (dev->data->promiscuous)
332 		return 0;
333 
334 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
335 	if (ret)
336 		hns3_err(hw, "Failed to disable allmulticast mode, ret = %d",
337 			ret);
338 	return ret;
339 }
340 
341 static int
342 hns3vf_restore_promisc(struct hns3_adapter *hns)
343 {
344 	struct hns3_hw *hw = &hns->hw;
345 	bool allmulti = hw->data->all_multicast ? true : false;
346 
347 	if (hw->data->promiscuous)
348 		return hns3vf_set_promisc_mode(hw, true, true, true);
349 
350 	return hns3vf_set_promisc_mode(hw, true, false, allmulti);
351 }
352 
353 static int
354 hns3vf_bind_ring_with_vector(struct hns3_hw *hw, uint16_t vector_id,
355 			     bool mmap, enum hns3_ring_type queue_type,
356 			     uint16_t queue_id)
357 {
358 	struct hns3_vf_to_pf_msg req = {0};
359 	const char *op_str;
360 	int ret;
361 
362 	req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
363 		HNS3_MBX_UNMAP_RING_TO_VECTOR;
364 	req.vector_id = (uint8_t)vector_id;
365 	req.ring_num = 1;
366 
367 	if (queue_type == HNS3_RING_TYPE_RX)
368 		req.ring_param[0].int_gl_index = HNS3_RING_GL_RX;
369 	else
370 		req.ring_param[0].int_gl_index = HNS3_RING_GL_TX;
371 	req.ring_param[0].ring_type = queue_type;
372 	req.ring_param[0].tqp_index = queue_id;
373 	op_str = mmap ? "Map" : "Unmap";
374 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
375 	if (ret)
376 		hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.",
377 			 op_str, queue_id, req.vector_id, ret);
378 
379 	return ret;
380 }
381 
382 static int
383 hns3vf_dev_configure(struct rte_eth_dev *dev)
384 {
385 	struct hns3_adapter *hns = dev->data->dev_private;
386 	struct hns3_hw *hw = &hns->hw;
387 	struct rte_eth_conf *conf = &dev->data->dev_conf;
388 	enum rte_eth_rx_mq_mode mq_mode = conf->rxmode.mq_mode;
389 	uint16_t nb_rx_q = dev->data->nb_rx_queues;
390 	uint16_t nb_tx_q = dev->data->nb_tx_queues;
391 	struct rte_eth_rss_conf rss_conf;
392 	bool gro_en;
393 	int ret;
394 
395 	hw->cfg_max_queues = RTE_MAX(nb_rx_q, nb_tx_q);
396 
397 	/*
398 	 * Some versions of hardware network engine does not support
399 	 * individually enable/disable/reset the Tx or Rx queue. These devices
400 	 * must enable/disable/reset Tx and Rx queues at the same time. When the
401 	 * numbers of Tx queues allocated by upper applications are not equal to
402 	 * the numbers of Rx queues, driver needs to setup fake Tx or Rx queues
403 	 * to adjust numbers of Tx/Rx queues. otherwise, network engine can not
404 	 * work as usual. But these fake queues are imperceptible, and can not
405 	 * be used by upper applications.
406 	 */
407 	ret = hns3_set_fake_rx_or_tx_queues(dev, nb_rx_q, nb_tx_q);
408 	if (ret) {
409 		hns3_err(hw, "fail to set Rx/Tx fake queues, ret = %d.", ret);
410 		hw->cfg_max_queues = 0;
411 		return ret;
412 	}
413 
414 	hw->adapter_state = HNS3_NIC_CONFIGURING;
415 	if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
416 		hns3_err(hw, "setting link speed/duplex not supported");
417 		ret = -EINVAL;
418 		goto cfg_err;
419 	}
420 
421 	/* When RSS is not configured, redirect the packet queue 0 */
422 	if ((uint32_t)mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) {
423 		conf->rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
424 		rss_conf = conf->rx_adv_conf.rss_conf;
425 		ret = hns3_dev_rss_hash_update(dev, &rss_conf);
426 		if (ret)
427 			goto cfg_err;
428 	}
429 
430 	ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
431 	if (ret != 0)
432 		goto cfg_err;
433 
434 	ret = hns3vf_dev_configure_vlan(dev);
435 	if (ret)
436 		goto cfg_err;
437 
438 	/* config hardware GRO */
439 	gro_en = conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO ? true : false;
440 	ret = hns3_config_gro(hw, gro_en);
441 	if (ret)
442 		goto cfg_err;
443 
444 	hns3_init_rx_ptype_tble(dev);
445 
446 	hw->adapter_state = HNS3_NIC_CONFIGURED;
447 	return 0;
448 
449 cfg_err:
450 	hw->cfg_max_queues = 0;
451 	(void)hns3_set_fake_rx_or_tx_queues(dev, 0, 0);
452 	hw->adapter_state = HNS3_NIC_INITIALIZED;
453 
454 	return ret;
455 }
456 
457 static int
458 hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu)
459 {
460 	struct hns3_vf_to_pf_msg req;
461 	int ret;
462 
463 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0);
464 	memcpy(req.data, &mtu, sizeof(mtu));
465 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
466 	if (ret)
467 		hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret);
468 
469 	return ret;
470 }
471 
472 static int
473 hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
474 {
475 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
476 	uint32_t frame_size = mtu + HNS3_ETH_OVERHEAD;
477 	int ret;
478 
479 	/*
480 	 * The hns3 PF/VF devices on the same port share the hardware MTU
481 	 * configuration. Currently, we send mailbox to inform hns3 PF kernel
482 	 * ethdev driver to finish hardware MTU configuration in hns3 VF PMD,
483 	 * there is no need to stop the port for hns3 VF device, and the
484 	 * MTU value issued by hns3 VF PMD must be less than or equal to
485 	 * PF's MTU.
486 	 */
487 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
488 		hns3_err(hw, "Failed to set mtu during resetting");
489 		return -EIO;
490 	}
491 
492 	/*
493 	 * when Rx of scattered packets is off, we have some possibility of
494 	 * using vector Rx process function or simple Rx functions in hns3 PMD.
495 	 * If the input MTU is increased and the maximum length of
496 	 * received packets is greater than the length of a buffer for Rx
497 	 * packet, the hardware network engine needs to use multiple BDs and
498 	 * buffers to store these packets. This will cause problems when still
499 	 * using vector Rx process function or simple Rx function to receiving
500 	 * packets. So, when Rx of scattered packets is off and device is
501 	 * started, it is not permitted to increase MTU so that the maximum
502 	 * length of Rx packets is greater than Rx buffer length.
503 	 */
504 	if (dev->data->dev_started && !dev->data->scattered_rx &&
505 	    frame_size > hw->rx_buf_len) {
506 		hns3_err(hw, "failed to set mtu because current is "
507 			"not scattered rx mode");
508 		return -EOPNOTSUPP;
509 	}
510 
511 	rte_spinlock_lock(&hw->lock);
512 	ret = hns3vf_config_mtu(hw, mtu);
513 	if (ret) {
514 		rte_spinlock_unlock(&hw->lock);
515 		return ret;
516 	}
517 	rte_spinlock_unlock(&hw->lock);
518 
519 	return 0;
520 }
521 
522 static void
523 hns3vf_clear_event_cause(struct hns3_hw *hw, uint32_t regclr)
524 {
525 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, regclr);
526 }
527 
528 static void
529 hns3vf_disable_irq0(struct hns3_hw *hw)
530 {
531 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 0);
532 }
533 
534 static void
535 hns3vf_enable_irq0(struct hns3_hw *hw)
536 {
537 	hns3_write_dev(hw, HNS3_MISC_VECTOR_REG_BASE, 1);
538 }
539 
540 void
541 hns3vf_clear_reset_event(struct hns3_hw *hw)
542 {
543 	uint32_t clearval;
544 	uint32_t cmdq_stat_reg;
545 
546 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
547 	clearval = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
548 	hns3_write_dev(hw, HNS3_VECTOR0_CMDQ_SRC_REG, clearval);
549 
550 	hns3vf_enable_irq0(hw);
551 }
552 
553 static enum hns3vf_evt_cause
554 hns3vf_check_event_cause(struct hns3_adapter *hns, uint32_t *clearval)
555 {
556 	struct hns3_hw *hw = &hns->hw;
557 	enum hns3vf_evt_cause ret;
558 	uint32_t cmdq_stat_reg;
559 	uint32_t rst_ing_reg;
560 	uint32_t val;
561 
562 	/* Fetch the events from their corresponding regs */
563 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
564 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
565 		rst_ing_reg = hns3_read_dev(hw, HNS3_FUN_RST_ING);
566 		hns3_warn(hw, "resetting reg: 0x%x", rst_ing_reg);
567 		hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
568 		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
569 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
570 		hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT);
571 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B);
572 		hw->reset.stats.global_cnt++;
573 		hns3_warn(hw, "Global reset detected, clear reset status");
574 
575 		ret = HNS3VF_VECTOR0_EVENT_RST;
576 		goto out;
577 	}
578 
579 	/* Check for vector0 mailbox(=CMDQ RX) event source */
580 	if (BIT(HNS3_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
581 		val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RX_CMDQ_INT_B);
582 		ret = HNS3VF_VECTOR0_EVENT_MBX;
583 		goto out;
584 	}
585 
586 	val = 0;
587 	ret = HNS3VF_VECTOR0_EVENT_OTHER;
588 
589 out:
590 	*clearval = val;
591 	return ret;
592 }
593 
594 static void
595 hns3vf_interrupt_handler(void *param)
596 {
597 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
598 	struct hns3_adapter *hns = dev->data->dev_private;
599 	struct hns3_hw *hw = &hns->hw;
600 	enum hns3vf_evt_cause event_cause;
601 	uint32_t clearval;
602 
603 	/* Disable interrupt */
604 	hns3vf_disable_irq0(hw);
605 
606 	/* Read out interrupt causes */
607 	event_cause = hns3vf_check_event_cause(hns, &clearval);
608 	/* Clear interrupt causes */
609 	hns3vf_clear_event_cause(hw, clearval);
610 
611 	switch (event_cause) {
612 	case HNS3VF_VECTOR0_EVENT_RST:
613 		hns3_schedule_reset(hns);
614 		break;
615 	case HNS3VF_VECTOR0_EVENT_MBX:
616 		hns3vf_handle_mbx_msg(hw);
617 		break;
618 	default:
619 		break;
620 	}
621 
622 	/* Enable interrupt if it is not caused by reset */
623 	if (event_cause == HNS3VF_VECTOR0_EVENT_MBX ||
624 	    event_cause == HNS3VF_VECTOR0_EVENT_OTHER)
625 		hns3vf_enable_irq0(hw);
626 }
627 
628 void
629 hns3vf_update_push_lsc_cap(struct hns3_hw *hw, bool supported)
630 {
631 	uint16_t val = supported ? HNS3_PF_PUSH_LSC_CAP_SUPPORTED :
632 				   HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
633 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
634 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
635 
636 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
637 		rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
638 					  rte_memory_order_acquire, rte_memory_order_acquire);
639 }
640 
641 static void
642 hns3vf_get_push_lsc_cap(struct hns3_hw *hw)
643 {
644 #define HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS	500
645 
646 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
647 	int32_t remain_ms = HNS3_CHECK_PUSH_LSC_CAP_TIMEOUT_MS;
648 	uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED;
649 	uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN;
650 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
651 	struct hns3_vf_to_pf_msg req;
652 
653 	rte_atomic_store_explicit(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN,
654 			 rte_memory_order_release);
655 
656 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
657 	(void)hns3vf_mbx_send(hw, &req, false, NULL, 0);
658 
659 	while (remain_ms > 0) {
660 		rte_delay_ms(HNS3_POLL_RESPONE_MS);
661 		/*
662 		 * The probe process may perform in interrupt thread context.
663 		 * For example, users attach a device in the secondary process.
664 		 * At the moment, the handling mailbox task will be blocked. So
665 		 * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE
666 		 * mailbox from PF driver to get this capability.
667 		 */
668 		hns3vf_handle_mbx_msg(hw);
669 		if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) !=
670 			HNS3_PF_PUSH_LSC_CAP_UNKNOWN)
671 			break;
672 		remain_ms--;
673 	}
674 
675 	/*
676 	 * When exit above loop, the pf_push_lsc_cap could be one of the three
677 	 * state: unknown (means pf not ack), not_supported, supported.
678 	 * Here config it as 'not_supported' when it's 'unknown' state.
679 	 */
680 	rte_atomic_compare_exchange_strong_explicit(&vf->pf_push_lsc_cap, &exp, val,
681 				  rte_memory_order_acquire, rte_memory_order_acquire);
682 
683 	if (rte_atomic_load_explicit(&vf->pf_push_lsc_cap, rte_memory_order_acquire) ==
684 		HNS3_PF_PUSH_LSC_CAP_SUPPORTED) {
685 		hns3_info(hw, "detect PF support push link status change!");
686 	} else {
687 		/*
688 		 * Framework already set RTE_ETH_DEV_INTR_LSC bit because driver
689 		 * declared RTE_PCI_DRV_INTR_LSC in drv_flags. So here cleared
690 		 * the RTE_ETH_DEV_INTR_LSC capability.
691 		 */
692 		dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
693 	}
694 }
695 
696 static int
697 hns3vf_get_capability(struct hns3_hw *hw)
698 {
699 	int ret;
700 
701 	if (hw->revision < PCI_REVISION_ID_HIP09_A) {
702 		hns3_set_default_dev_specifications(hw);
703 		hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_RSV_ONE;
704 		hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_2US;
705 		hw->tso_mode = HNS3_TSO_SW_CAL_PSEUDO_H_CSUM;
706 		hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE1;
707 		hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
708 		hw->rss_info.ipv6_sctp_offload_supported = false;
709 		hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
710 		hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64;
711 		return 0;
712 	}
713 
714 	ret = hns3_query_dev_specifications(hw);
715 	if (ret) {
716 		PMD_INIT_LOG(ERR,
717 			     "failed to query dev specifications, ret = %d",
718 			     ret);
719 		return ret;
720 	}
721 
722 	hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL;
723 	hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US;
724 	hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM;
725 	hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
726 	hw->rss_info.ipv6_sctp_offload_supported = true;
727 	hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
728 	hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128;
729 
730 	return 0;
731 }
732 
733 static int
734 hns3vf_check_tqp_info(struct hns3_hw *hw)
735 {
736 	if (hw->tqps_num == 0) {
737 		PMD_INIT_LOG(ERR, "Get invalid tqps_num(0) from PF.");
738 		return -EINVAL;
739 	}
740 
741 	if (hw->rss_size_max == 0) {
742 		PMD_INIT_LOG(ERR, "Get invalid rss_size_max(0) from PF.");
743 		return -EINVAL;
744 	}
745 
746 	hw->tqps_num = RTE_MIN(hw->rss_size_max, hw->tqps_num);
747 
748 	return 0;
749 }
750 
751 static int
752 hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw)
753 {
754 	struct hns3_vf_to_pf_msg req;
755 	uint8_t resp_msg;
756 	int ret;
757 
758 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
759 			 HNS3_MBX_GET_PORT_BASE_VLAN_STATE);
760 	ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg));
761 	if (ret) {
762 		if (ret == -ETIME) {
763 			/*
764 			 * Getting current port based VLAN state from PF driver
765 			 * will not affect VF driver's basic function. Because
766 			 * the VF driver relies on hns3 PF kernel ether driver,
767 			 * to avoid introducing compatibility issues with older
768 			 * version of PF driver, no failure will be returned
769 			 * when the return value is ETIME. This return value has
770 			 * the following scenarios:
771 			 * 1) Firmware didn't return the results in time
772 			 * 2) the result return by firmware is timeout
773 			 * 3) the older version of kernel side PF driver does
774 			 *    not support this mailbox message.
775 			 * For scenarios 1 and 2, it is most likely that a
776 			 * hardware error has occurred, or a hardware reset has
777 			 * occurred. In this case, these errors will be caught
778 			 * by other functions.
779 			 */
780 			PMD_INIT_LOG(WARNING,
781 				"failed to get PVID state for timeout, maybe "
782 				"kernel side PF driver doesn't support this "
783 				"mailbox message, or firmware didn't respond.");
784 			resp_msg = HNS3_PORT_BASE_VLAN_DISABLE;
785 		} else {
786 			PMD_INIT_LOG(ERR, "failed to get port based VLAN state,"
787 				" ret = %d", ret);
788 			return ret;
789 		}
790 	}
791 	hw->port_base_vlan_cfg.state = resp_msg ?
792 		HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE;
793 	return 0;
794 }
795 
796 static int
797 hns3vf_get_queue_info(struct hns3_hw *hw)
798 {
799 #define HNS3VF_TQPS_RSS_INFO_LEN	6
800 	uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN];
801 	struct hns3_vf_to_pf_msg req;
802 	int ret;
803 
804 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0);
805 	ret = hns3vf_mbx_send(hw, &req, true,
806 			      resp_msg, HNS3VF_TQPS_RSS_INFO_LEN);
807 	if (ret) {
808 		PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret);
809 		return ret;
810 	}
811 
812 	memcpy(&hw->tqps_num, &resp_msg[0], sizeof(uint16_t));
813 	memcpy(&hw->rss_size_max, &resp_msg[2], sizeof(uint16_t));
814 
815 	return hns3vf_check_tqp_info(hw);
816 }
817 
818 static void
819 hns3vf_update_caps(struct hns3_hw *hw, uint32_t caps)
820 {
821 	if (hns3_get_bit(caps, HNS3VF_CAPS_VLAN_FLT_MOD_B))
822 		hns3_set_bit(hw->capability,
823 				HNS3_DEV_SUPPORT_VF_VLAN_FLT_MOD_B, 1);
824 }
825 
826 static int
827 hns3vf_get_num_tc(struct hns3_hw *hw)
828 {
829 	uint8_t num_tc = 0;
830 	uint32_t i;
831 
832 	for (i = 0; i < HNS3_MAX_TC_NUM; i++) {
833 		if (hw->hw_tc_map & BIT(i))
834 			num_tc++;
835 	}
836 	return num_tc;
837 }
838 
839 static int
840 hns3vf_get_basic_info(struct hns3_hw *hw)
841 {
842 	uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE];
843 	struct hns3_basic_info *basic_info;
844 	struct hns3_vf_to_pf_msg req;
845 	int ret;
846 
847 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0);
848 	ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg));
849 	if (ret) {
850 		hns3_err(hw, "failed to get basic info from PF, ret = %d.",
851 				ret);
852 		return ret;
853 	}
854 
855 	basic_info = (struct hns3_basic_info *)resp_msg;
856 	hw->hw_tc_map = basic_info->hw_tc_map;
857 	hw->num_tc = hns3vf_get_num_tc(hw);
858 	hw->pf_vf_if_version = basic_info->pf_vf_if_version;
859 	hns3vf_update_caps(hw, basic_info->caps);
860 
861 	return 0;
862 }
863 
864 static int
865 hns3vf_get_host_mac_addr(struct hns3_hw *hw)
866 {
867 	uint8_t host_mac[RTE_ETHER_ADDR_LEN];
868 	struct hns3_vf_to_pf_msg req;
869 	int ret;
870 
871 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0);
872 	ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN);
873 	if (ret) {
874 		hns3_err(hw, "Failed to get mac addr from PF: %d", ret);
875 		return ret;
876 	}
877 
878 	memcpy(hw->mac.mac_addr, host_mac, RTE_ETHER_ADDR_LEN);
879 
880 	return 0;
881 }
882 
883 static int
884 hns3vf_get_configuration(struct hns3_hw *hw)
885 {
886 	int ret;
887 
888 	hw->mac.media_type = HNS3_MEDIA_TYPE_NONE;
889 
890 	/* Get device capability */
891 	ret = hns3vf_get_capability(hw);
892 	if (ret) {
893 		PMD_INIT_LOG(ERR, "failed to get device capability: %d.", ret);
894 		return ret;
895 	}
896 
897 	hns3vf_get_push_lsc_cap(hw);
898 
899 	/* Get basic info from PF */
900 	ret = hns3vf_get_basic_info(hw);
901 	if (ret)
902 		return ret;
903 
904 	/* Get queue configuration from PF */
905 	ret = hns3vf_get_queue_info(hw);
906 	if (ret)
907 		return ret;
908 
909 	/* Get user defined VF MAC addr from PF */
910 	ret = hns3vf_get_host_mac_addr(hw);
911 	if (ret)
912 		return ret;
913 
914 	return hns3vf_get_port_base_vlan_filter_state(hw);
915 }
916 
917 static void
918 hns3vf_request_link_info(struct hns3_hw *hw)
919 {
920 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
921 	struct hns3_vf_to_pf_msg req;
922 	bool send_req;
923 	int ret;
924 
925 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
926 		return;
927 
928 	send_req = vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED ||
929 		   vf->req_link_info_cnt > 0;
930 	if (!send_req)
931 		return;
932 
933 	hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0);
934 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
935 	if (ret) {
936 		hns3_err(hw, "failed to fetch link status, ret = %d", ret);
937 		return;
938 	}
939 
940 	if (vf->req_link_info_cnt > 0)
941 		vf->req_link_info_cnt--;
942 }
943 
944 void
945 hns3vf_update_link_status(struct hns3_hw *hw, uint8_t link_status,
946 			  uint32_t link_speed, uint8_t link_duplex)
947 {
948 	struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
949 	struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw);
950 	struct hns3_mac *mac = &hw->mac;
951 	int ret;
952 
953 	/*
954 	 * PF kernel driver may push link status when VF driver is in resetting,
955 	 * driver will stop polling job in this case, after resetting done
956 	 * driver will start polling job again.
957 	 * When polling job started, driver will get initial link status by
958 	 * sending request to PF kernel driver, then could update link status by
959 	 * process PF kernel driver's link status mailbox message.
960 	 */
961 	if (!rte_atomic_load_explicit(&vf->poll_job_started, rte_memory_order_relaxed))
962 		return;
963 
964 	if (hw->adapter_state != HNS3_NIC_STARTED)
965 		return;
966 
967 	mac->link_status = link_status;
968 	mac->link_speed = link_speed;
969 	mac->link_duplex = link_duplex;
970 	ret = hns3vf_dev_link_update(dev, 0);
971 	if (ret == 0 && dev->data->dev_conf.intr_conf.lsc != 0)
972 		hns3_start_report_lse(dev);
973 }
974 
975 static int
976 hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on)
977 {
978 	struct hns3_mbx_vlan_filter *vlan_filter;
979 	struct hns3_vf_to_pf_msg req = {0};
980 	struct hns3_hw *hw = &hns->hw;
981 
982 	req.code = HNS3_MBX_SET_VLAN;
983 	req.subcode = HNS3_MBX_VLAN_FILTER;
984 	vlan_filter = (struct hns3_mbx_vlan_filter *)req.data;
985 	vlan_filter->is_kill = on ? 0 : 1;
986 	vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN);
987 	vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id);
988 
989 	return hns3vf_mbx_send(hw, &req, true, NULL, 0);
990 }
991 
992 static int
993 hns3vf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
994 {
995 	struct hns3_adapter *hns = dev->data->dev_private;
996 	struct hns3_hw *hw = &hns->hw;
997 	int ret;
998 
999 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
1000 		hns3_err(hw,
1001 			 "vf set vlan id failed during resetting, vlan_id =%u",
1002 			 vlan_id);
1003 		return -EIO;
1004 	}
1005 	rte_spinlock_lock(&hw->lock);
1006 	ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1007 	rte_spinlock_unlock(&hw->lock);
1008 	if (ret)
1009 		hns3_err(hw, "vf set vlan id failed, vlan_id =%u, ret =%d",
1010 			 vlan_id, ret);
1011 
1012 	return ret;
1013 }
1014 
1015 static int
1016 hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable)
1017 {
1018 	struct hns3_vf_to_pf_msg req;
1019 	uint8_t msg_data;
1020 	int ret;
1021 
1022 	if (!hns3_dev_get_support(hw, VF_VLAN_FLT_MOD))
1023 		return 0;
1024 
1025 	msg_data = enable ? 1 : 0;
1026 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
1027 			 HNS3_MBX_ENABLE_VLAN_FILTER);
1028 	memcpy(req.data, &msg_data, sizeof(msg_data));
1029 	ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
1030 	if (ret)
1031 		hns3_err(hw, "%s vlan filter failed, ret = %d.",
1032 				enable ? "enable" : "disable", ret);
1033 
1034 	return ret;
1035 }
1036 
1037 static int
1038 hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable)
1039 {
1040 	struct hns3_vf_to_pf_msg req;
1041 	uint8_t msg_data;
1042 	int ret;
1043 
1044 	msg_data = enable ? 1 : 0;
1045 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN,
1046 			 HNS3_MBX_VLAN_RX_OFF_CFG);
1047 	memcpy(req.data, &msg_data, sizeof(msg_data));
1048 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
1049 	if (ret)
1050 		hns3_err(hw, "vf %s strip failed, ret = %d.",
1051 				enable ? "enable" : "disable", ret);
1052 
1053 	return ret;
1054 }
1055 
1056 static int
1057 hns3vf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1058 {
1059 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1060 	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1061 	unsigned int tmp_mask;
1062 	int ret = 0;
1063 
1064 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed)) {
1065 		hns3_err(hw, "vf set vlan offload failed during resetting, mask = 0x%x",
1066 			 mask);
1067 		return -EIO;
1068 	}
1069 
1070 	tmp_mask = (unsigned int)mask;
1071 
1072 	if (tmp_mask & RTE_ETH_VLAN_FILTER_MASK) {
1073 		rte_spinlock_lock(&hw->lock);
1074 		/* Enable or disable VLAN filter */
1075 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
1076 			ret = hns3vf_en_vlan_filter(hw, true);
1077 		else
1078 			ret = hns3vf_en_vlan_filter(hw, false);
1079 		rte_spinlock_unlock(&hw->lock);
1080 		if (ret)
1081 			return ret;
1082 	}
1083 
1084 	/* Vlan stripping setting */
1085 	if (tmp_mask & RTE_ETH_VLAN_STRIP_MASK) {
1086 		rte_spinlock_lock(&hw->lock);
1087 		/* Enable or disable VLAN stripping */
1088 		if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
1089 			ret = hns3vf_en_hw_strip_rxvtag(hw, true);
1090 		else
1091 			ret = hns3vf_en_hw_strip_rxvtag(hw, false);
1092 		rte_spinlock_unlock(&hw->lock);
1093 	}
1094 
1095 	return ret;
1096 }
1097 
1098 static int
1099 hns3vf_handle_all_vlan_table(struct hns3_adapter *hns, int on)
1100 {
1101 	struct rte_vlan_filter_conf *vfc;
1102 	struct hns3_hw *hw = &hns->hw;
1103 	uint16_t vlan_id;
1104 	uint64_t vbit;
1105 	uint64_t ids;
1106 	int ret = 0;
1107 	uint32_t i;
1108 
1109 	vfc = &hw->data->vlan_filter_conf;
1110 	for (i = 0; i < RTE_DIM(vfc->ids); i++) {
1111 		if (vfc->ids[i] == 0)
1112 			continue;
1113 		ids = vfc->ids[i];
1114 		while (ids) {
1115 			/*
1116 			 * 64 means the num bits of ids, one bit corresponds to
1117 			 * one vlan id
1118 			 */
1119 			vlan_id = 64 * i;
1120 			/* count trailing zeroes */
1121 			vbit = ~ids & (ids - 1);
1122 			/* clear least significant bit set */
1123 			ids ^= (ids ^ (ids - 1)) ^ vbit;
1124 			for (; vbit;) {
1125 				vbit >>= 1;
1126 				vlan_id++;
1127 			}
1128 			ret = hns3vf_vlan_filter_configure(hns, vlan_id, on);
1129 			if (ret) {
1130 				hns3_err(hw,
1131 					 "VF handle vlan table failed, ret =%d, on = %d",
1132 					 ret, on);
1133 				return ret;
1134 			}
1135 		}
1136 	}
1137 
1138 	return ret;
1139 }
1140 
1141 static int
1142 hns3vf_remove_all_vlan_table(struct hns3_adapter *hns)
1143 {
1144 	return hns3vf_handle_all_vlan_table(hns, 0);
1145 }
1146 
1147 static int
1148 hns3vf_restore_vlan_conf(struct hns3_adapter *hns)
1149 {
1150 	struct hns3_hw *hw = &hns->hw;
1151 	struct rte_eth_conf *dev_conf;
1152 	bool en;
1153 	int ret;
1154 
1155 	dev_conf = &hw->data->dev_conf;
1156 	en = dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP ? true
1157 								   : false;
1158 	ret = hns3vf_en_hw_strip_rxvtag(hw, en);
1159 	if (ret)
1160 		hns3_err(hw, "VF restore vlan conf fail, en =%d, ret =%d", en,
1161 			 ret);
1162 	return ret;
1163 }
1164 
1165 static int
1166 hns3vf_dev_configure_vlan(struct rte_eth_dev *dev)
1167 {
1168 	struct hns3_adapter *hns = dev->data->dev_private;
1169 	struct rte_eth_dev_data *data = dev->data;
1170 	struct hns3_hw *hw = &hns->hw;
1171 	int ret;
1172 
1173 	if (data->dev_conf.txmode.hw_vlan_reject_tagged ||
1174 	    data->dev_conf.txmode.hw_vlan_reject_untagged ||
1175 	    data->dev_conf.txmode.hw_vlan_insert_pvid) {
1176 		hns3_warn(hw, "hw_vlan_reject_tagged, hw_vlan_reject_untagged "
1177 			      "or hw_vlan_insert_pvid is not support!");
1178 	}
1179 
1180 	/* Apply vlan offload setting */
1181 	ret = hns3vf_vlan_offload_set(dev, RTE_ETH_VLAN_STRIP_MASK |
1182 					RTE_ETH_VLAN_FILTER_MASK);
1183 	if (ret)
1184 		hns3_err(hw, "dev config vlan offload failed, ret = %d.", ret);
1185 
1186 	return ret;
1187 }
1188 
1189 static int
1190 hns3vf_set_alive(struct hns3_hw *hw, bool alive)
1191 {
1192 	struct hns3_vf_to_pf_msg req;
1193 	uint8_t msg_data;
1194 
1195 	msg_data = alive ? 1 : 0;
1196 	hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0);
1197 	memcpy(req.data, &msg_data, sizeof(msg_data));
1198 	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
1199 }
1200 
1201 static void
1202 hns3vf_keep_alive_handler(void *param)
1203 {
1204 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1205 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1206 	struct hns3_vf_to_pf_msg req;
1207 	struct hns3_hw *hw = &hns->hw;
1208 	int ret;
1209 
1210 	hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0);
1211 	ret = hns3vf_mbx_send(hw, &req, false, NULL, 0);
1212 	if (ret)
1213 		hns3_err(hw, "VF sends keeping alive cmd failed(=%d)",
1214 			 ret);
1215 
1216 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1217 			  eth_dev);
1218 }
1219 
1220 static void
1221 hns3vf_service_handler(void *param)
1222 {
1223 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
1224 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1225 	struct hns3_hw *hw = &hns->hw;
1226 
1227 	/*
1228 	 * The query link status and reset processing are executed in the
1229 	 * interrupt thread. When the IMP reset occurs, IMP will not respond,
1230 	 * and the query operation will timeout after 30ms. In the case of
1231 	 * multiple PF/VFs, each query failure timeout causes the IMP reset
1232 	 * interrupt to fail to respond within 100ms.
1233 	 * Before querying the link status, check whether there is a reset
1234 	 * pending, and if so, abandon the query.
1235 	 */
1236 	if (!hns3vf_is_reset_pending(hns)) {
1237 		hns3vf_request_link_info(hw);
1238 		hns3_update_hw_stats(hw);
1239 	} else {
1240 		hns3_warn(hw, "Cancel the query when reset is pending");
1241 	}
1242 
1243 	rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
1244 			  eth_dev);
1245 }
1246 
1247 static void
1248 hns3vf_start_poll_job(struct rte_eth_dev *dev)
1249 {
1250 #define HNS3_REQUEST_LINK_INFO_REMAINS_CNT	3
1251 
1252 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1253 
1254 	if (vf->pf_push_lsc_cap == HNS3_PF_PUSH_LSC_CAP_SUPPORTED)
1255 		vf->req_link_info_cnt = HNS3_REQUEST_LINK_INFO_REMAINS_CNT;
1256 
1257 	rte_atomic_store_explicit(&vf->poll_job_started, 1, rte_memory_order_relaxed);
1258 
1259 	hns3vf_service_handler(dev);
1260 }
1261 
1262 static void
1263 hns3vf_stop_poll_job(struct rte_eth_dev *dev)
1264 {
1265 	struct hns3_vf *vf = HNS3_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1266 
1267 	rte_eal_alarm_cancel(hns3vf_service_handler, dev);
1268 
1269 	rte_atomic_store_explicit(&vf->poll_job_started, 0, rte_memory_order_relaxed);
1270 }
1271 
1272 static int
1273 hns3_query_vf_resource(struct hns3_hw *hw)
1274 {
1275 	struct hns3_vf_res_cmd *req;
1276 	struct hns3_cmd_desc desc;
1277 	uint16_t num_msi;
1278 	int ret;
1279 
1280 	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_VF_RSRC, true);
1281 	ret = hns3_cmd_send(hw, &desc, 1);
1282 	if (ret) {
1283 		hns3_err(hw, "query vf resource failed, ret = %d", ret);
1284 		return ret;
1285 	}
1286 
1287 	req = (struct hns3_vf_res_cmd *)desc.data;
1288 	num_msi = hns3_get_field(rte_le_to_cpu_16(req->vf_intr_vector_number),
1289 				 HNS3_VF_VEC_NUM_M, HNS3_VF_VEC_NUM_S);
1290 	if (num_msi < HNS3_MIN_VECTOR_NUM) {
1291 		hns3_err(hw, "Just %u msi resources, not enough for vf(min:%d)",
1292 			 num_msi, HNS3_MIN_VECTOR_NUM);
1293 		return -EINVAL;
1294 	}
1295 
1296 	hw->num_msi = num_msi;
1297 
1298 	return 0;
1299 }
1300 
1301 static int
1302 hns3vf_init_hardware(struct hns3_adapter *hns)
1303 {
1304 	struct hns3_hw *hw = &hns->hw;
1305 	uint16_t mtu = hw->data->mtu;
1306 	int ret;
1307 
1308 	ret = hns3vf_set_promisc_mode(hw, true, false, false);
1309 	if (ret)
1310 		return ret;
1311 
1312 	ret = hns3vf_config_mtu(hw, mtu);
1313 	if (ret)
1314 		goto err_init_hardware;
1315 
1316 	ret = hns3vf_vlan_filter_configure(hns, 0, 1);
1317 	if (ret) {
1318 		PMD_INIT_LOG(ERR, "Failed to initialize VLAN config: %d", ret);
1319 		goto err_init_hardware;
1320 	}
1321 
1322 	ret = hns3_config_gro(hw, false);
1323 	if (ret) {
1324 		PMD_INIT_LOG(ERR, "Failed to config gro: %d", ret);
1325 		goto err_init_hardware;
1326 	}
1327 
1328 	/*
1329 	 * In the initialization clearing the all hardware mapping relationship
1330 	 * configurations between queues and interrupt vectors is needed, so
1331 	 * some error caused by the residual configurations, such as the
1332 	 * unexpected interrupt, can be avoid.
1333 	 */
1334 	ret = hns3_init_ring_with_vector(hw);
1335 	if (ret) {
1336 		PMD_INIT_LOG(ERR, "Failed to init ring intr vector: %d", ret);
1337 		goto err_init_hardware;
1338 	}
1339 
1340 	return 0;
1341 
1342 err_init_hardware:
1343 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1344 	return ret;
1345 }
1346 
1347 static int
1348 hns3vf_clear_vport_list(struct hns3_hw *hw)
1349 {
1350 	struct hns3_vf_to_pf_msg req;
1351 
1352 	hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL,
1353 			 HNS3_MBX_VPORT_LIST_CLEAR);
1354 	return hns3vf_mbx_send(hw, &req, false, NULL, 0);
1355 }
1356 
1357 static int
1358 hns3vf_init_vf(struct rte_eth_dev *eth_dev)
1359 {
1360 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1361 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1362 	struct hns3_hw *hw = &hns->hw;
1363 	int ret;
1364 
1365 	PMD_INIT_FUNC_TRACE();
1366 
1367 	/* Get hardware io base address from pcie BAR2 IO space */
1368 	hw->io_base = pci_dev->mem_resource[2].addr;
1369 
1370 	ret = hns3_get_pci_revision_id(hw, &hw->revision);
1371 	if (ret)
1372 		return ret;
1373 
1374 	/* Firmware command queue initialize */
1375 	ret = hns3_cmd_init_queue(hw);
1376 	if (ret) {
1377 		PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
1378 		goto err_cmd_init_queue;
1379 	}
1380 
1381 	/* Firmware command initialize */
1382 	ret = hns3_cmd_init(hw);
1383 	if (ret) {
1384 		PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
1385 		goto err_cmd_init;
1386 	}
1387 
1388 	hns3_tx_push_init(eth_dev);
1389 
1390 	/* Get VF resource */
1391 	ret = hns3_query_vf_resource(hw);
1392 	if (ret)
1393 		goto err_cmd_init;
1394 
1395 	rte_spinlock_init(&hw->mbx_resp.lock);
1396 
1397 	hns3vf_clear_event_cause(hw, 0);
1398 
1399 	ret = rte_intr_callback_register(pci_dev->intr_handle,
1400 					 hns3vf_interrupt_handler, eth_dev);
1401 	if (ret) {
1402 		PMD_INIT_LOG(ERR, "Failed to register intr: %d", ret);
1403 		goto err_intr_callback_register;
1404 	}
1405 
1406 	/* Enable interrupt */
1407 	rte_intr_enable(pci_dev->intr_handle);
1408 	hns3vf_enable_irq0(hw);
1409 
1410 	/* Get configuration from PF */
1411 	ret = hns3vf_get_configuration(hw);
1412 	if (ret) {
1413 		PMD_INIT_LOG(ERR, "Failed to fetch configuration: %d", ret);
1414 		goto err_get_config;
1415 	}
1416 
1417 	ret = hns3_stats_init(hw);
1418 	if (ret)
1419 		goto err_get_config;
1420 
1421 	ret = hns3_queue_to_tc_mapping(hw, hw->tqps_num, hw->tqps_num);
1422 	if (ret) {
1423 		PMD_INIT_LOG(ERR, "failed to set tc info, ret = %d.", ret);
1424 		goto err_set_tc_queue;
1425 	}
1426 
1427 	ret = hns3vf_clear_vport_list(hw);
1428 	if (ret) {
1429 		PMD_INIT_LOG(ERR, "Failed to clear tbl list: %d", ret);
1430 		goto err_set_tc_queue;
1431 	}
1432 
1433 	ret = hns3vf_init_hardware(hns);
1434 	if (ret)
1435 		goto err_set_tc_queue;
1436 
1437 	hns3_rss_set_default_args(hw);
1438 
1439 	ret = hns3vf_set_alive(hw, true);
1440 	if (ret) {
1441 		PMD_INIT_LOG(ERR, "Failed to VF send alive to PF: %d", ret);
1442 		goto err_set_tc_queue;
1443 	}
1444 
1445 	return 0;
1446 
1447 err_set_tc_queue:
1448 	hns3_stats_uninit(hw);
1449 
1450 err_get_config:
1451 	hns3vf_disable_irq0(hw);
1452 	rte_intr_disable(pci_dev->intr_handle);
1453 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1454 			     eth_dev);
1455 err_intr_callback_register:
1456 err_cmd_init:
1457 	hns3_cmd_uninit(hw);
1458 	hns3_cmd_destroy_queue(hw);
1459 err_cmd_init_queue:
1460 	hw->io_base = NULL;
1461 
1462 	return ret;
1463 }
1464 
1465 static void
1466 hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
1467 {
1468 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1469 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1470 	struct hns3_hw *hw = &hns->hw;
1471 
1472 	PMD_INIT_FUNC_TRACE();
1473 
1474 	hns3_rss_uninit(hns);
1475 	(void)hns3_config_gro(hw, false);
1476 	(void)hns3vf_set_alive(hw, false);
1477 	(void)hns3vf_set_promisc_mode(hw, false, false, false);
1478 	hns3_flow_uninit(eth_dev);
1479 	hns3_stats_uninit(hw);
1480 	hns3vf_disable_irq0(hw);
1481 	rte_intr_disable(pci_dev->intr_handle);
1482 	hns3_intr_unregister(pci_dev->intr_handle, hns3vf_interrupt_handler,
1483 			     eth_dev);
1484 	hns3_cmd_uninit(hw);
1485 	hns3_cmd_destroy_queue(hw);
1486 	hw->io_base = NULL;
1487 }
1488 
1489 static int
1490 hns3vf_do_stop(struct hns3_adapter *hns)
1491 {
1492 	struct hns3_hw *hw = &hns->hw;
1493 	int ret;
1494 
1495 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1496 
1497 	/*
1498 	 * The "hns3vf_do_stop" function will also be called by .stop_service to
1499 	 * prepare reset. At the time of global or IMP reset, the command cannot
1500 	 * be sent to stop the tx/rx queues. The mbuf in Tx/Rx queues may be
1501 	 * accessed during the reset process. So the mbuf can not be released
1502 	 * during reset and is required to be released after the reset is
1503 	 * completed.
1504 	 */
1505 	if (rte_atomic_load_explicit(&hw->reset.resetting,  rte_memory_order_relaxed) == 0)
1506 		hns3_dev_release_mbufs(hns);
1507 
1508 	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0) {
1509 		hns3_configure_all_mac_addr(hns, true);
1510 		ret = hns3_reset_all_tqps(hns);
1511 		if (ret) {
1512 			hns3_err(hw, "failed to reset all queues ret = %d",
1513 				 ret);
1514 			return ret;
1515 		}
1516 	}
1517 	return 0;
1518 }
1519 
1520 static int
1521 hns3vf_dev_stop(struct rte_eth_dev *dev)
1522 {
1523 	struct hns3_adapter *hns = dev->data->dev_private;
1524 	struct hns3_hw *hw = &hns->hw;
1525 
1526 	PMD_INIT_FUNC_TRACE();
1527 	dev->data->dev_started = 0;
1528 
1529 	hw->adapter_state = HNS3_NIC_STOPPING;
1530 	hns3_stop_rxtx_datapath(dev);
1531 
1532 	rte_spinlock_lock(&hw->lock);
1533 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed) == 0) {
1534 		hns3_stop_tqps(hw);
1535 		hns3vf_do_stop(hns);
1536 		hns3_unmap_rx_interrupt(dev);
1537 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1538 	}
1539 	hns3_rx_scattered_reset(dev);
1540 	hns3vf_stop_poll_job(dev);
1541 	hns3_stop_report_lse(dev);
1542 	rte_spinlock_unlock(&hw->lock);
1543 
1544 	return 0;
1545 }
1546 
1547 static int
1548 hns3vf_dev_close(struct rte_eth_dev *eth_dev)
1549 {
1550 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1551 	struct hns3_hw *hw = &hns->hw;
1552 	int ret = 0;
1553 
1554 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1555 		hns3_mp_uninit(eth_dev);
1556 		return 0;
1557 	}
1558 
1559 	if (hw->adapter_state == HNS3_NIC_STARTED)
1560 		ret = hns3vf_dev_stop(eth_dev);
1561 
1562 	hw->adapter_state = HNS3_NIC_CLOSING;
1563 	hns3_reset_abort(hns);
1564 	hw->adapter_state = HNS3_NIC_CLOSED;
1565 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1566 	hns3_configure_all_mc_mac_addr(hns, true);
1567 	hns3vf_remove_all_vlan_table(hns);
1568 	hns3vf_uninit_vf(eth_dev);
1569 	hns3_free_all_queues(eth_dev);
1570 	rte_free(hw->reset.wait_data);
1571 	hns3_mp_uninit(eth_dev);
1572 	hns3_warn(hw, "Close port %u finished", hw->data->port_id);
1573 
1574 	return ret;
1575 }
1576 
1577 static int
1578 hns3vf_dev_link_update(struct rte_eth_dev *eth_dev,
1579 		       __rte_unused int wait_to_complete)
1580 {
1581 	struct hns3_adapter *hns = eth_dev->data->dev_private;
1582 	struct hns3_hw *hw = &hns->hw;
1583 	struct hns3_mac *mac = &hw->mac;
1584 	struct rte_eth_link new_link;
1585 
1586 	memset(&new_link, 0, sizeof(new_link));
1587 	switch (mac->link_speed) {
1588 	case RTE_ETH_SPEED_NUM_10M:
1589 	case RTE_ETH_SPEED_NUM_100M:
1590 	case RTE_ETH_SPEED_NUM_1G:
1591 	case RTE_ETH_SPEED_NUM_10G:
1592 	case RTE_ETH_SPEED_NUM_25G:
1593 	case RTE_ETH_SPEED_NUM_40G:
1594 	case RTE_ETH_SPEED_NUM_50G:
1595 	case RTE_ETH_SPEED_NUM_100G:
1596 	case RTE_ETH_SPEED_NUM_200G:
1597 		if (mac->link_status)
1598 			new_link.link_speed = mac->link_speed;
1599 		break;
1600 	default:
1601 		if (mac->link_status)
1602 			new_link.link_speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1603 		break;
1604 	}
1605 
1606 	if (!mac->link_status)
1607 		new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
1608 
1609 	new_link.link_duplex = mac->link_duplex;
1610 	new_link.link_status = mac->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
1611 	new_link.link_autoneg =
1612 	    !(eth_dev->data->dev_conf.link_speeds & RTE_ETH_LINK_SPEED_FIXED);
1613 
1614 	return rte_eth_linkstatus_set(eth_dev, &new_link);
1615 }
1616 
1617 static int
1618 hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
1619 {
1620 	struct hns3_hw *hw = &hns->hw;
1621 	uint16_t nb_rx_q = hw->data->nb_rx_queues;
1622 	uint16_t nb_tx_q = hw->data->nb_tx_queues;
1623 	int ret;
1624 
1625 	ret = hns3_queue_to_tc_mapping(hw, nb_rx_q, nb_tx_q);
1626 	if (ret)
1627 		return ret;
1628 
1629 	hns3_enable_rxd_adv_layout(hw);
1630 
1631 	ret = hns3_init_queues(hns, reset_queue);
1632 	if (ret) {
1633 		hns3_err(hw, "failed to init queues, ret = %d.", ret);
1634 		return ret;
1635 	}
1636 
1637 	return hns3_restore_filter(hns);
1638 }
1639 
1640 static int
1641 hns3vf_dev_start(struct rte_eth_dev *dev)
1642 {
1643 	struct hns3_adapter *hns = dev->data->dev_private;
1644 	struct hns3_hw *hw = &hns->hw;
1645 	int ret;
1646 
1647 	PMD_INIT_FUNC_TRACE();
1648 	if (rte_atomic_load_explicit(&hw->reset.resetting, rte_memory_order_relaxed))
1649 		return -EBUSY;
1650 
1651 	rte_spinlock_lock(&hw->lock);
1652 	hw->adapter_state = HNS3_NIC_STARTING;
1653 	ret = hns3vf_do_start(hns, true);
1654 	if (ret) {
1655 		hw->adapter_state = HNS3_NIC_CONFIGURED;
1656 		rte_spinlock_unlock(&hw->lock);
1657 		return ret;
1658 	}
1659 	ret = hns3_map_rx_interrupt(dev);
1660 	if (ret)
1661 		goto map_rx_inter_err;
1662 
1663 	/*
1664 	 * There are three register used to control the status of a TQP
1665 	 * (contains a pair of Tx queue and Rx queue) in the new version network
1666 	 * engine. One is used to control the enabling of Tx queue, the other is
1667 	 * used to control the enabling of Rx queue, and the last is the master
1668 	 * switch used to control the enabling of the tqp. The Tx register and
1669 	 * TQP register must be enabled at the same time to enable a Tx queue.
1670 	 * The same applies to the Rx queue. For the older network enginem, this
1671 	 * function only refresh the enabled flag, and it is used to update the
1672 	 * status of queue in the dpdk framework.
1673 	 */
1674 	ret = hns3_start_all_txqs(dev);
1675 	if (ret)
1676 		goto map_rx_inter_err;
1677 
1678 	ret = hns3_start_all_rxqs(dev);
1679 	if (ret)
1680 		goto start_all_rxqs_fail;
1681 
1682 	hw->adapter_state = HNS3_NIC_STARTED;
1683 	rte_spinlock_unlock(&hw->lock);
1684 
1685 	hns3_rx_scattered_calc(dev);
1686 	hns3_start_rxtx_datapath(dev);
1687 
1688 	/* Enable interrupt of all rx queues before enabling queues */
1689 	hns3_dev_all_rx_queue_intr_enable(hw, true);
1690 	hns3_start_tqps(hw);
1691 
1692 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1693 		hns3vf_dev_link_update(dev, 0);
1694 	hns3vf_start_poll_job(dev);
1695 
1696 	return ret;
1697 
1698 start_all_rxqs_fail:
1699 	hns3_stop_all_txqs(dev);
1700 map_rx_inter_err:
1701 	(void)hns3vf_do_stop(hns);
1702 	hw->adapter_state = HNS3_NIC_CONFIGURED;
1703 	rte_spinlock_unlock(&hw->lock);
1704 
1705 	return ret;
1706 }
1707 
1708 static bool
1709 is_vf_reset_done(struct hns3_hw *hw)
1710 {
1711 #define HNS3_FUN_RST_ING_BITS \
1712 	(BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) | \
1713 	 BIT(HNS3_VECTOR0_CORERESET_INT_B) | \
1714 	 BIT(HNS3_VECTOR0_IMPRESET_INT_B) | \
1715 	 BIT(HNS3_VECTOR0_FUNCRESET_INT_B))
1716 
1717 	uint32_t val;
1718 
1719 	if (hw->reset.level == HNS3_VF_RESET) {
1720 		val = hns3_read_dev(hw, HNS3_VF_RST_ING);
1721 		if (val & HNS3_VF_RST_ING_BIT)
1722 			return false;
1723 	} else {
1724 		val = hns3_read_dev(hw, HNS3_FUN_RST_ING);
1725 		if (val & HNS3_FUN_RST_ING_BITS)
1726 			return false;
1727 	}
1728 	return true;
1729 }
1730 
1731 static enum hns3_reset_level
1732 hns3vf_detect_reset_event(struct hns3_hw *hw)
1733 {
1734 	enum hns3_reset_level reset = HNS3_NONE_RESET;
1735 	uint32_t cmdq_stat_reg;
1736 
1737 	cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG);
1738 	if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg)
1739 		reset = HNS3_VF_RESET;
1740 
1741 	return reset;
1742 }
1743 
1744 bool
1745 hns3vf_is_reset_pending(struct hns3_adapter *hns)
1746 {
1747 	enum hns3_reset_level last_req;
1748 	struct hns3_hw *hw = &hns->hw;
1749 	enum hns3_reset_level new_req;
1750 
1751 	/*
1752 	 * According to the protocol of PCIe, FLR to a PF device resets the PF
1753 	 * state as well as the SR-IOV extended capability including VF Enable
1754 	 * which means that VFs no longer exist.
1755 	 *
1756 	 * HNS3_VF_FULL_RESET means PF device is in FLR reset. when PF device
1757 	 * is in FLR stage, the register state of VF device is not reliable,
1758 	 * so register states detection can not be carried out. In this case,
1759 	 * we just ignore the register states and return false to indicate that
1760 	 * there are no other reset states that need to be processed by driver.
1761 	 */
1762 	if (hw->reset.level == HNS3_VF_FULL_RESET)
1763 		return false;
1764 
1765 	/*
1766 	 * Only primary can process can process the reset event,
1767 	 * so don't check reset event in secondary.
1768 	 */
1769 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1770 		return false;
1771 
1772 	new_req = hns3vf_detect_reset_event(hw);
1773 	if (new_req == HNS3_NONE_RESET)
1774 		return false;
1775 
1776 	last_req = hns3vf_get_reset_level(hw, &hw->reset.pending);
1777 	if (last_req == HNS3_NONE_RESET || last_req < new_req) {
1778 		rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
1779 		hns3_schedule_delayed_reset(hns);
1780 		hns3_warn(hw, "High level reset detected, delay do reset");
1781 		return true;
1782 	}
1783 
1784 	return false;
1785 }
1786 
1787 static int
1788 hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
1789 {
1790 #define HNS3_WAIT_PF_RESET_READY_TIME 5
1791 	struct hns3_hw *hw = &hns->hw;
1792 	struct hns3_wait_data *wait_data = hw->reset.wait_data;
1793 	struct timeval tv;
1794 
1795 	if (wait_data->result == HNS3_WAIT_SUCCESS) {
1796 		/*
1797 		 * After vf reset is ready, the PF may not have completed
1798 		 * the reset processing. The vf sending mbox to PF may fail
1799 		 * during the pf reset, so it is better to add extra delay.
1800 		 */
1801 		if (hw->reset.level == HNS3_VF_FUNC_RESET ||
1802 		    hw->reset.level == HNS3_FLR_RESET)
1803 			return 0;
1804 		/* Reset retry process, no need to add extra delay. */
1805 		if (hw->reset.attempts)
1806 			return 0;
1807 		if (wait_data->check_completion == NULL)
1808 			return 0;
1809 
1810 		wait_data->check_completion = NULL;
1811 		wait_data->interval = HNS3_WAIT_PF_RESET_READY_TIME *
1812 			MSEC_PER_SEC * USEC_PER_MSEC;
1813 		wait_data->count = 1;
1814 		wait_data->result = HNS3_WAIT_REQUEST;
1815 		rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
1816 				  wait_data);
1817 		hns3_warn(hw, "hardware is ready, delay %d sec for PF reset complete",
1818 				HNS3_WAIT_PF_RESET_READY_TIME);
1819 		return -EAGAIN;
1820 	} else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
1821 		hns3_clock_gettime(&tv);
1822 		hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
1823 			  tv.tv_sec, tv.tv_usec);
1824 		return -ETIME;
1825 	} else if (wait_data->result == HNS3_WAIT_REQUEST)
1826 		return -EAGAIN;
1827 
1828 	wait_data->hns = hns;
1829 	wait_data->check_completion = is_vf_reset_done;
1830 	wait_data->end_ms = (uint64_t)HNS3VF_RESET_WAIT_CNT *
1831 				HNS3VF_RESET_WAIT_MS + hns3_clock_gettime_ms();
1832 	wait_data->interval = HNS3VF_RESET_WAIT_MS * USEC_PER_MSEC;
1833 	wait_data->count = HNS3VF_RESET_WAIT_CNT;
1834 	wait_data->result = HNS3_WAIT_REQUEST;
1835 	rte_eal_alarm_set(wait_data->interval, hns3_wait_callback, wait_data);
1836 	return -EAGAIN;
1837 }
1838 
1839 static int
1840 hns3vf_prepare_reset(struct hns3_adapter *hns)
1841 {
1842 	struct hns3_vf_to_pf_msg req;
1843 	struct hns3_hw *hw = &hns->hw;
1844 	int ret;
1845 
1846 	if (hw->reset.level == HNS3_VF_FUNC_RESET) {
1847 		hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0);
1848 		ret = hns3vf_mbx_send(hw, &req, true, NULL, 0);
1849 		if (ret)
1850 			return ret;
1851 	}
1852 	rte_atomic_store_explicit(&hw->reset.disable_cmd, 1, rte_memory_order_relaxed);
1853 
1854 	return 0;
1855 }
1856 
1857 static int
1858 hns3vf_stop_service(struct hns3_adapter *hns)
1859 {
1860 	struct hns3_hw *hw = &hns->hw;
1861 	struct rte_eth_dev *eth_dev;
1862 
1863 	eth_dev = &rte_eth_devices[hw->data->port_id];
1864 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1865 		/*
1866 		 * Make sure call update link status before hns3vf_stop_poll_job
1867 		 * because update link status depend on polling job exist.
1868 		 */
1869 		hns3vf_update_link_status(hw, RTE_ETH_LINK_DOWN, hw->mac.link_speed,
1870 					  hw->mac.link_duplex);
1871 		hns3vf_stop_poll_job(eth_dev);
1872 	}
1873 	hw->mac.link_status = RTE_ETH_LINK_DOWN;
1874 
1875 	hns3_stop_rxtx_datapath(eth_dev);
1876 
1877 	rte_spinlock_lock(&hw->lock);
1878 	if (hw->adapter_state == HNS3_NIC_STARTED ||
1879 	    hw->adapter_state == HNS3_NIC_STOPPING) {
1880 		hns3_enable_all_queues(hw, false);
1881 		hns3vf_do_stop(hns);
1882 		hw->reset.mbuf_deferred_free = true;
1883 	} else
1884 		hw->reset.mbuf_deferred_free = false;
1885 
1886 	rte_eal_alarm_cancel(hns3vf_keep_alive_handler, eth_dev);
1887 
1888 	/*
1889 	 * It is cumbersome for hardware to pick-and-choose entries for deletion
1890 	 * from table space. Hence, for function reset software intervention is
1891 	 * required to delete the entries.
1892 	 */
1893 	if (rte_atomic_load_explicit(&hw->reset.disable_cmd, rte_memory_order_relaxed) == 0)
1894 		hns3_configure_all_mc_mac_addr(hns, true);
1895 	rte_spinlock_unlock(&hw->lock);
1896 
1897 	return 0;
1898 }
1899 
1900 static int
1901 hns3vf_start_service(struct hns3_adapter *hns)
1902 {
1903 	struct hns3_hw *hw = &hns->hw;
1904 	struct rte_eth_dev *eth_dev;
1905 
1906 	eth_dev = &rte_eth_devices[hw->data->port_id];
1907 	hns3_start_rxtx_datapath(eth_dev);
1908 
1909 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
1910 			  eth_dev);
1911 
1912 	if (hw->adapter_state == HNS3_NIC_STARTED) {
1913 		hns3vf_start_poll_job(eth_dev);
1914 
1915 		/* Enable interrupt of all rx queues before enabling queues */
1916 		hns3_dev_all_rx_queue_intr_enable(hw, true);
1917 		/*
1918 		 * Enable state of each rxq and txq will be recovered after
1919 		 * reset, so we need to restore them before enable all tqps;
1920 		 */
1921 		hns3_restore_tqp_enable_state(hw);
1922 		/*
1923 		 * When finished the initialization, enable queues to receive
1924 		 * and transmit packets.
1925 		 */
1926 		hns3_enable_all_queues(hw, true);
1927 	}
1928 
1929 	return 0;
1930 }
1931 
1932 static int
1933 hns3vf_check_default_mac_change(struct hns3_hw *hw)
1934 {
1935 	char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
1936 	struct rte_ether_addr *hw_mac;
1937 	int ret;
1938 
1939 	/*
1940 	 * The hns3 PF ethdev driver in kernel support setting VF MAC address
1941 	 * on the host by "ip link set ..." command. If the hns3 PF kernel
1942 	 * ethdev driver sets the MAC address for VF device after the
1943 	 * initialization of the related VF device, the PF driver will notify
1944 	 * VF driver to reset VF device to make the new MAC address effective
1945 	 * immediately. The hns3 VF PMD should check whether the MAC
1946 	 * address has been changed by the PF kernel ethdev driver, if changed
1947 	 * VF driver should configure hardware using the new MAC address in the
1948 	 * recovering hardware configuration stage of the reset process.
1949 	 */
1950 	ret = hns3vf_get_host_mac_addr(hw);
1951 	if (ret)
1952 		return ret;
1953 
1954 	hw_mac = (struct rte_ether_addr *)hw->mac.mac_addr;
1955 	ret = rte_is_zero_ether_addr(hw_mac);
1956 	if (ret) {
1957 		rte_ether_addr_copy(&hw->data->mac_addrs[0], hw_mac);
1958 	} else {
1959 		ret = rte_is_same_ether_addr(&hw->data->mac_addrs[0], hw_mac);
1960 		if (!ret) {
1961 			rte_ether_addr_copy(hw_mac, &hw->data->mac_addrs[0]);
1962 			hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
1963 					      &hw->data->mac_addrs[0]);
1964 			hns3_warn(hw, "Default MAC address has been changed to:"
1965 				  " %s by the host PF kernel ethdev driver",
1966 				  mac_str);
1967 		}
1968 	}
1969 
1970 	return 0;
1971 }
1972 
1973 static int
1974 hns3vf_restore_conf(struct hns3_adapter *hns)
1975 {
1976 	struct hns3_hw *hw = &hns->hw;
1977 	int ret;
1978 
1979 	ret = hns3vf_check_default_mac_change(hw);
1980 	if (ret)
1981 		return ret;
1982 
1983 	ret = hns3_configure_all_mac_addr(hns, false);
1984 	if (ret)
1985 		return ret;
1986 
1987 	ret = hns3_configure_all_mc_mac_addr(hns, false);
1988 	if (ret)
1989 		goto err_mc_mac;
1990 
1991 	ret = hns3vf_restore_promisc(hns);
1992 	if (ret)
1993 		goto err_vlan_table;
1994 
1995 	ret = hns3vf_restore_vlan_conf(hns);
1996 	if (ret)
1997 		goto err_vlan_table;
1998 
1999 	ret = hns3vf_get_port_base_vlan_filter_state(hw);
2000 	if (ret)
2001 		goto err_vlan_table;
2002 
2003 	ret = hns3_restore_rx_interrupt(hw);
2004 	if (ret)
2005 		goto err_vlan_table;
2006 
2007 	ret = hns3_restore_gro_conf(hw);
2008 	if (ret)
2009 		goto err_vlan_table;
2010 
2011 	if (hw->adapter_state == HNS3_NIC_STARTED) {
2012 		ret = hns3vf_do_start(hns, false);
2013 		if (ret)
2014 			goto err_vlan_table;
2015 		hns3_info(hw, "hns3vf dev restart successful!");
2016 	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
2017 		hw->adapter_state = HNS3_NIC_CONFIGURED;
2018 
2019 	ret = hns3vf_set_alive(hw, true);
2020 	if (ret) {
2021 		hns3_err(hw, "failed to VF send alive to PF: %d", ret);
2022 		goto err_vlan_table;
2023 	}
2024 
2025 	return 0;
2026 
2027 err_vlan_table:
2028 	hns3_configure_all_mc_mac_addr(hns, true);
2029 err_mc_mac:
2030 	hns3_configure_all_mac_addr(hns, true);
2031 	return ret;
2032 }
2033 
2034 static enum hns3_reset_level
2035 hns3vf_get_reset_level(struct hns3_hw *hw, RTE_ATOMIC(uint64_t) *levels)
2036 {
2037 	enum hns3_reset_level reset_level;
2038 
2039 	/* return the highest priority reset level amongst all */
2040 	if (hns3_atomic_test_bit(HNS3_VF_RESET, levels))
2041 		reset_level = HNS3_VF_RESET;
2042 	else if (hns3_atomic_test_bit(HNS3_VF_FULL_RESET, levels))
2043 		reset_level = HNS3_VF_FULL_RESET;
2044 	else if (hns3_atomic_test_bit(HNS3_VF_PF_FUNC_RESET, levels))
2045 		reset_level = HNS3_VF_PF_FUNC_RESET;
2046 	else if (hns3_atomic_test_bit(HNS3_VF_FUNC_RESET, levels))
2047 		reset_level = HNS3_VF_FUNC_RESET;
2048 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
2049 		reset_level = HNS3_FLR_RESET;
2050 	else
2051 		reset_level = HNS3_NONE_RESET;
2052 
2053 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
2054 		return HNS3_NONE_RESET;
2055 
2056 	return reset_level;
2057 }
2058 
2059 static void
2060 hns3vf_reset_service(void *param)
2061 {
2062 	struct hns3_adapter *hns = (struct hns3_adapter *)param;
2063 	struct hns3_hw *hw = &hns->hw;
2064 	enum hns3_reset_level reset_level;
2065 	struct timeval tv_delta;
2066 	struct timeval tv_start;
2067 	struct timeval tv;
2068 	uint64_t msec;
2069 
2070 	/*
2071 	 * The interrupt is not triggered within the delay time.
2072 	 * The interrupt may have been lost. It is necessary to handle
2073 	 * the interrupt to recover from the error.
2074 	 */
2075 	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
2076 			    SCHEDULE_DEFERRED) {
2077 		rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_REQUESTED,
2078 				 rte_memory_order_relaxed);
2079 		hns3_err(hw, "Handling interrupts in delayed tasks");
2080 		hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
2081 		reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2082 		if (reset_level == HNS3_NONE_RESET) {
2083 			hns3_err(hw, "No reset level is set, try global reset");
2084 			hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
2085 		}
2086 	}
2087 	rte_atomic_store_explicit(&hw->reset.schedule, SCHEDULE_NONE, rte_memory_order_relaxed);
2088 
2089 	/*
2090 	 * Hardware reset has been notified, we now have to poll & check if
2091 	 * hardware has actually completed the reset sequence.
2092 	 */
2093 	reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
2094 	if (reset_level != HNS3_NONE_RESET) {
2095 		hns3_clock_gettime(&tv_start);
2096 		hns3_reset_process(hns, reset_level);
2097 		hns3_clock_gettime(&tv);
2098 		timersub(&tv, &tv_start, &tv_delta);
2099 		msec = hns3_clock_calctime_ms(&tv_delta);
2100 		if (msec > HNS3_RESET_PROCESS_MS)
2101 			hns3_err(hw, "%d handle long time delta %" PRIu64
2102 				 " ms time=%ld.%.6ld",
2103 				 hw->reset.level, msec, tv.tv_sec, tv.tv_usec);
2104 	}
2105 }
2106 
2107 static int
2108 hns3vf_reinit_dev(struct hns3_adapter *hns)
2109 {
2110 	struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
2111 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2112 	struct hns3_hw *hw = &hns->hw;
2113 	int ret;
2114 
2115 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2116 		rte_intr_disable(pci_dev->intr_handle);
2117 		ret = rte_pci_set_bus_master(pci_dev, true);
2118 		if (ret < 0) {
2119 			hns3_err(hw, "failed to set pci bus, ret = %d", ret);
2120 			return ret;
2121 		}
2122 	}
2123 
2124 	/* Firmware command initialize */
2125 	ret = hns3_cmd_init(hw);
2126 	if (ret) {
2127 		hns3_err(hw, "Failed to init cmd: %d", ret);
2128 		return ret;
2129 	}
2130 
2131 	if (hw->reset.level == HNS3_VF_FULL_RESET) {
2132 		/*
2133 		 * UIO enables msix by writing the pcie configuration space
2134 		 * vfio_pci enables msix in rte_intr_enable.
2135 		 */
2136 		if (pci_dev->kdrv == RTE_PCI_KDRV_IGB_UIO ||
2137 		    pci_dev->kdrv == RTE_PCI_KDRV_UIO_GENERIC) {
2138 			ret = hns3vf_enable_msix(pci_dev, true);
2139 			if (ret != 0) {
2140 				hns3_err(hw, "Failed to enable msix");
2141 				return ret;
2142 			}
2143 		}
2144 
2145 		rte_intr_enable(pci_dev->intr_handle);
2146 	}
2147 
2148 	ret = hns3_reset_all_tqps(hns);
2149 	if (ret) {
2150 		hns3_err(hw, "Failed to reset all queues: %d", ret);
2151 		return ret;
2152 	}
2153 
2154 	ret = hns3vf_init_hardware(hns);
2155 	if (ret) {
2156 		hns3_err(hw, "Failed to init hardware: %d", ret);
2157 		return ret;
2158 	}
2159 
2160 	return 0;
2161 }
2162 
2163 static const struct eth_dev_ops hns3vf_eth_dev_ops = {
2164 	.dev_configure      = hns3vf_dev_configure,
2165 	.dev_start          = hns3vf_dev_start,
2166 	.dev_stop           = hns3vf_dev_stop,
2167 	.dev_close          = hns3vf_dev_close,
2168 	.mtu_set            = hns3vf_dev_mtu_set,
2169 	.promiscuous_enable = hns3vf_dev_promiscuous_enable,
2170 	.promiscuous_disable = hns3vf_dev_promiscuous_disable,
2171 	.allmulticast_enable = hns3vf_dev_allmulticast_enable,
2172 	.allmulticast_disable = hns3vf_dev_allmulticast_disable,
2173 	.stats_get          = hns3_stats_get,
2174 	.stats_reset        = hns3_stats_reset,
2175 	.xstats_get         = hns3_dev_xstats_get,
2176 	.xstats_get_names   = hns3_dev_xstats_get_names,
2177 	.xstats_reset       = hns3_dev_xstats_reset,
2178 	.xstats_get_by_id   = hns3_dev_xstats_get_by_id,
2179 	.xstats_get_names_by_id = hns3_dev_xstats_get_names_by_id,
2180 	.dev_infos_get      = hns3_dev_infos_get,
2181 	.fw_version_get     = hns3_fw_version_get,
2182 	.rx_queue_setup     = hns3_rx_queue_setup,
2183 	.tx_queue_setup     = hns3_tx_queue_setup,
2184 	.rx_queue_release   = hns3_dev_rx_queue_release,
2185 	.tx_queue_release   = hns3_dev_tx_queue_release,
2186 	.rx_queue_start     = hns3_dev_rx_queue_start,
2187 	.rx_queue_stop      = hns3_dev_rx_queue_stop,
2188 	.tx_queue_start     = hns3_dev_tx_queue_start,
2189 	.tx_queue_stop      = hns3_dev_tx_queue_stop,
2190 	.rx_queue_intr_enable   = hns3_dev_rx_queue_intr_enable,
2191 	.rx_queue_intr_disable  = hns3_dev_rx_queue_intr_disable,
2192 	.rxq_info_get       = hns3_rxq_info_get,
2193 	.txq_info_get       = hns3_txq_info_get,
2194 	.rx_burst_mode_get  = hns3_rx_burst_mode_get,
2195 	.tx_burst_mode_get  = hns3_tx_burst_mode_get,
2196 	.mac_addr_add       = hns3_add_mac_addr,
2197 	.mac_addr_remove    = hns3_remove_mac_addr,
2198 	.mac_addr_set       = hns3vf_set_default_mac_addr,
2199 	.set_mc_addr_list   = hns3_set_mc_mac_addr_list,
2200 	.link_update        = hns3vf_dev_link_update,
2201 	.rss_hash_update    = hns3_dev_rss_hash_update,
2202 	.rss_hash_conf_get  = hns3_dev_rss_hash_conf_get,
2203 	.reta_update        = hns3_dev_rss_reta_update,
2204 	.reta_query         = hns3_dev_rss_reta_query,
2205 	.flow_ops_get       = hns3_dev_flow_ops_get,
2206 	.vlan_filter_set    = hns3vf_vlan_filter_set,
2207 	.vlan_offload_set   = hns3vf_vlan_offload_set,
2208 	.get_reg            = hns3_get_regs,
2209 	.dev_supported_ptypes_get = hns3_dev_supported_ptypes_get,
2210 	.tx_done_cleanup    = hns3_tx_done_cleanup,
2211 	.eth_dev_priv_dump  = hns3_eth_dev_priv_dump,
2212 	.eth_rx_descriptor_dump = hns3_rx_descriptor_dump,
2213 	.eth_tx_descriptor_dump = hns3_tx_descriptor_dump,
2214 	.get_monitor_addr       = hns3_get_monitor_addr,
2215 };
2216 
2217 static const struct hns3_reset_ops hns3vf_reset_ops = {
2218 	.reset_service       = hns3vf_reset_service,
2219 	.stop_service        = hns3vf_stop_service,
2220 	.prepare_reset       = hns3vf_prepare_reset,
2221 	.wait_hardware_ready = hns3vf_wait_hardware_ready,
2222 	.reinit_dev          = hns3vf_reinit_dev,
2223 	.restore_conf        = hns3vf_restore_conf,
2224 	.start_service       = hns3vf_start_service,
2225 };
2226 
2227 static void
2228 hns3vf_init_hw_ops(struct hns3_hw *hw)
2229 {
2230 	hw->ops.add_mc_mac_addr = hns3vf_add_mc_mac_addr;
2231 	hw->ops.del_mc_mac_addr = hns3vf_remove_mc_mac_addr;
2232 	hw->ops.add_uc_mac_addr = hns3vf_add_uc_mac_addr;
2233 	hw->ops.del_uc_mac_addr = hns3vf_remove_uc_mac_addr;
2234 	hw->ops.bind_ring_with_vector = hns3vf_bind_ring_with_vector;
2235 }
2236 
2237 static int
2238 hns3vf_dev_init(struct rte_eth_dev *eth_dev)
2239 {
2240 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2241 	struct hns3_hw *hw = &hns->hw;
2242 	int ret;
2243 
2244 	PMD_INIT_FUNC_TRACE();
2245 
2246 	hns3_flow_init(eth_dev);
2247 
2248 	hns3_set_rxtx_function(eth_dev);
2249 	eth_dev->dev_ops = &hns3vf_eth_dev_ops;
2250 	eth_dev->rx_queue_count = hns3_rx_queue_count;
2251 	ret = hns3_mp_init(eth_dev);
2252 	if (ret)
2253 		goto err_mp_init;
2254 
2255 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2256 		hns3_tx_push_init(eth_dev);
2257 		return 0;
2258 	}
2259 
2260 	hw->adapter_state = HNS3_NIC_UNINITIALIZED;
2261 	hns->is_vf = true;
2262 	hw->data = eth_dev->data;
2263 	hns3_parse_devargs(eth_dev);
2264 
2265 	ret = hns3_reset_init(hw);
2266 	if (ret)
2267 		goto err_init_reset;
2268 	hw->reset.ops = &hns3vf_reset_ops;
2269 
2270 	hns3vf_init_hw_ops(hw);
2271 	ret = hns3vf_init_vf(eth_dev);
2272 	if (ret) {
2273 		PMD_INIT_LOG(ERR, "Failed to init vf: %d", ret);
2274 		goto err_init_vf;
2275 	}
2276 
2277 	ret = hns3_init_mac_addrs(eth_dev);
2278 	if (ret != 0)
2279 		goto err_init_mac_addrs;
2280 
2281 	hw->adapter_state = HNS3_NIC_INITIALIZED;
2282 
2283 	if (rte_atomic_load_explicit(&hw->reset.schedule, rte_memory_order_relaxed) ==
2284 			    SCHEDULE_PENDING) {
2285 		hns3_err(hw, "Reschedule reset service after dev_init");
2286 		hns3_schedule_reset(hns);
2287 	} else {
2288 		/* IMP will wait ready flag before reset */
2289 		hns3_notify_reset_ready(hw, false);
2290 	}
2291 	rte_eal_alarm_set(HNS3VF_KEEP_ALIVE_INTERVAL, hns3vf_keep_alive_handler,
2292 			  eth_dev);
2293 	return 0;
2294 
2295 err_init_mac_addrs:
2296 	hns3vf_uninit_vf(eth_dev);
2297 
2298 err_init_vf:
2299 	rte_free(hw->reset.wait_data);
2300 
2301 err_init_reset:
2302 	hns3_mp_uninit(eth_dev);
2303 
2304 err_mp_init:
2305 	eth_dev->dev_ops = NULL;
2306 	eth_dev->rx_pkt_burst = NULL;
2307 	eth_dev->rx_descriptor_status = NULL;
2308 	eth_dev->tx_pkt_burst = NULL;
2309 	eth_dev->tx_pkt_prepare = NULL;
2310 	eth_dev->tx_descriptor_status = NULL;
2311 
2312 	return ret;
2313 }
2314 
2315 static int
2316 hns3vf_dev_uninit(struct rte_eth_dev *eth_dev)
2317 {
2318 	struct hns3_adapter *hns = eth_dev->data->dev_private;
2319 	struct hns3_hw *hw = &hns->hw;
2320 
2321 	PMD_INIT_FUNC_TRACE();
2322 
2323 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2324 		hns3_mp_uninit(eth_dev);
2325 		return 0;
2326 	}
2327 
2328 	if (hw->adapter_state < HNS3_NIC_CLOSING)
2329 		hns3vf_dev_close(eth_dev);
2330 
2331 	hw->adapter_state = HNS3_NIC_REMOVED;
2332 	return 0;
2333 }
2334 
2335 static int
2336 eth_hns3vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2337 		     struct rte_pci_device *pci_dev)
2338 {
2339 	return rte_eth_dev_pci_generic_probe(pci_dev,
2340 					     sizeof(struct hns3_adapter),
2341 					     hns3vf_dev_init);
2342 }
2343 
2344 static int
2345 eth_hns3vf_pci_remove(struct rte_pci_device *pci_dev)
2346 {
2347 	return rte_eth_dev_pci_generic_remove(pci_dev, hns3vf_dev_uninit);
2348 }
2349 
2350 static const struct rte_pci_id pci_id_hns3vf_map[] = {
2351 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_VF) },
2352 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_PFC_VF) },
2353 	{ .vendor_id = 0, }, /* sentinel */
2354 };
2355 
2356 static struct rte_pci_driver rte_hns3vf_pmd = {
2357 	.id_table = pci_id_hns3vf_map,
2358 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2359 	.probe = eth_hns3vf_pci_probe,
2360 	.remove = eth_hns3vf_pci_remove,
2361 };
2362 
2363 RTE_PMD_REGISTER_PCI(net_hns3_vf, rte_hns3vf_pmd);
2364 RTE_PMD_REGISTER_PCI_TABLE(net_hns3_vf, pci_id_hns3vf_map);
2365 RTE_PMD_REGISTER_KMOD_DEP(net_hns3_vf, "* igb_uio | vfio-pci");
2366 RTE_PMD_REGISTER_PARAM_STRING(net_hns3_vf,
2367 		HNS3_DEVARG_RX_FUNC_HINT "=vec|sve|simple|common "
2368 		HNS3_DEVARG_TX_FUNC_HINT "=vec|sve|simple|common "
2369 		HNS3_DEVARG_DEV_CAPS_MASK "=<1-65535> "
2370 		HNS3_DEVARG_MBX_TIME_LIMIT_MS "=<uint16_t> ");
2371