xref: /dpdk/drivers/net/hinic/hinic_pmd_ethdev.c (revision 8809f78c7dd9f33a44a4f89c58fc91ded34296ed)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include <rte_pci.h>
6 #include <rte_bus_pci.h>
7 #include <rte_ethdev_pci.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_ether.h>
14 
15 #include "base/hinic_compat.h"
16 #include "base/hinic_pmd_hwdev.h"
17 #include "base/hinic_pmd_hwif.h"
18 #include "base/hinic_pmd_wq.h"
19 #include "base/hinic_pmd_cfg.h"
20 #include "base/hinic_pmd_mgmt.h"
21 #include "base/hinic_pmd_cmdq.h"
22 #include "base/hinic_pmd_niccfg.h"
23 #include "base/hinic_pmd_nicio.h"
24 #include "base/hinic_pmd_mbox.h"
25 #include "hinic_pmd_ethdev.h"
26 #include "hinic_pmd_tx.h"
27 #include "hinic_pmd_rx.h"
28 
29 /* Vendor ID used by Huawei devices */
30 #define HINIC_HUAWEI_VENDOR_ID		0x19E5
31 
32 /* Hinic devices */
33 #define HINIC_DEV_ID_PRD		0x1822
34 #define HINIC_DEV_ID_VF			0x375E
35 #define HINIC_DEV_ID_VF_HV		0x379E
36 
37 /* Mezz card for Blade Server */
38 #define HINIC_DEV_ID_MEZZ_25GE		0x0210
39 #define HINIC_DEV_ID_MEZZ_100GE		0x0205
40 
41 /* 2*25G and 2*100G card */
42 #define HINIC_DEV_ID_1822_DUAL_25GE	0x0206
43 #define HINIC_DEV_ID_1822_100GE		0x0200
44 
45 #define HINIC_SERVICE_MODE_NIC		2
46 
47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES	10
48 
49 #define DEFAULT_BASE_COS		4
50 #define NR_MAX_COS			8
51 
52 #define HINIC_MIN_RX_BUF_SIZE		1024
53 #define HINIC_MAX_UC_MAC_ADDRS		128
54 #define HINIC_MAX_MC_MAC_ADDRS		2048
55 
56 #define HINIC_DEFAULT_BURST_SIZE	32
57 #define HINIC_DEFAULT_NB_QUEUES		1
58 #define HINIC_DEFAULT_RING_SIZE		1024
59 #define HINIC_MAX_LRO_SIZE		65536
60 
61 /*
62  * vlan_id is a 12 bit number.
63  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
64  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
65  * The higher 7 bit val specifies VFTA array index.
66  */
67 #define HINIC_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
68 #define HINIC_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
69 
70 #define HINIC_VLAN_FILTER_EN		(1U << 0)
71 
72 #define HINIC_MTU_TO_PKTLEN(mtu)	\
73 	((mtu) + ETH_HLEN + ETH_CRC_LEN)
74 
75 #define HINIC_PKTLEN_TO_MTU(pktlen)	\
76 	((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
77 
78 /* lro numer limit for one packet */
79 #define HINIC_LRO_WQE_NUM_DEFAULT	8
80 
81 struct hinic_xstats_name_off {
82 	char name[RTE_ETH_XSTATS_NAME_SIZE];
83 	u32  offset;
84 };
85 
86 #define HINIC_FUNC_STAT(_stat_item) {	\
87 	.name = #_stat_item, \
88 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
89 }
90 
91 #define HINIC_PORT_STAT(_stat_item) { \
92 	.name = #_stat_item, \
93 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
94 }
95 
96 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = {
97 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
98 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
99 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
100 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
101 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
102 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
103 
104 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
105 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
106 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
107 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
108 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
109 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
110 
111 	HINIC_FUNC_STAT(tx_discard_vport),
112 	HINIC_FUNC_STAT(rx_discard_vport),
113 	HINIC_FUNC_STAT(tx_err_vport),
114 	HINIC_FUNC_STAT(rx_err_vport),
115 };
116 
117 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \
118 		sizeof(hinic_vport_stats_strings[0]))
119 
120 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = {
121 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
122 	HINIC_PORT_STAT(mac_rx_total_oct_num),
123 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
124 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
125 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
126 	HINIC_PORT_STAT(mac_rx_good_oct_num),
127 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
128 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
129 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
130 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
131 	HINIC_PORT_STAT(mac_tx_total_oct_num),
132 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
133 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
134 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
135 	HINIC_PORT_STAT(mac_tx_good_oct_num),
136 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
137 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
138 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
139 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
140 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
141 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
142 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
143 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
144 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
145 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
146 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
147 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
148 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
149 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
150 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
151 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
152 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
153 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
154 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
155 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
156 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
157 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
158 	HINIC_PORT_STAT(mac_rx_mac_pause_num),
159 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
160 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
161 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
162 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
163 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
164 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
165 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
166 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
167 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
168 	HINIC_PORT_STAT(mac_rx_mac_control_pkt_num),
169 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
170 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
171 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
172 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
173 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
174 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
175 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
176 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
177 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
178 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
179 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
180 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
181 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
182 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
183 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
184 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
185 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
186 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
187 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
188 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
189 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
190 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
191 	HINIC_PORT_STAT(mac_trans_jabber_pkt_num),
192 	HINIC_PORT_STAT(mac_tx_mac_pause_num),
193 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
194 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
195 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
196 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
197 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
198 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
199 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
200 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
201 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
202 	HINIC_PORT_STAT(mac_tx_mac_control_pkt_num),
203 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
204 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
205 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
206 };
207 
208 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \
209 		sizeof(hinic_phyport_stats_strings[0]))
210 
211 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = {
212 	{"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)},
213 	{"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)},
214 };
215 
216 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \
217 		sizeof(hinic_rxq_stats_strings[0]))
218 
219 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = {
220 	{"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)},
221 	{"offload_errors", offsetof(struct hinic_txq_stats, off_errs)},
222 	{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
223 	{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
224 	{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
225 	{"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)},
226 	{"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)},
227 };
228 
229 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
230 		sizeof(hinic_txq_stats_strings[0]))
231 
232 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev)
233 {
234 	if (HINIC_IS_VF(nic_dev->hwdev)) {
235 		return (HINIC_VPORT_XSTATS_NUM +
236 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
237 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
238 	} else {
239 		return (HINIC_VPORT_XSTATS_NUM +
240 			HINIC_PHYPORT_XSTATS_NUM +
241 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
242 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
243 	}
244 }
245 
246 static const struct rte_eth_desc_lim hinic_rx_desc_lim = {
247 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
248 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
249 	.nb_align = HINIC_RXD_ALIGN,
250 };
251 
252 static const struct rte_eth_desc_lim hinic_tx_desc_lim = {
253 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
254 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
255 	.nb_align = HINIC_TXD_ALIGN,
256 };
257 
258 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
259 
260 /**
261  * Interrupt handler triggered by NIC  for handling
262  * specific event.
263  *
264  * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
265  */
266 static void hinic_dev_interrupt_handler(void *param)
267 {
268 	struct rte_eth_dev *dev = param;
269 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
270 
271 	if (!rte_bit_relaxed_get32(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) {
272 		PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d",
273 			    nic_dev->proc_dev_name, dev->data->port_id);
274 		return;
275 	}
276 
277 	/* aeq0 msg handler */
278 	hinic_dev_handle_aeq_event(nic_dev->hwdev, param);
279 }
280 
281 /**
282  * Ethernet device configuration.
283  *
284  * Prepare the driver for a given number of TX and RX queues, mtu size
285  * and configure RSS.
286  *
287  * @param dev
288  *   Pointer to Ethernet device structure.
289  *
290  * @return
291  *   0 on success, negative error value otherwise.
292  */
293 static int hinic_dev_configure(struct rte_eth_dev *dev)
294 {
295 	struct hinic_nic_dev *nic_dev;
296 	struct hinic_nic_io *nic_io;
297 	int err;
298 
299 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
300 	nic_io = nic_dev->hwdev->nic_io;
301 
302 	nic_dev->num_sq =  dev->data->nb_tx_queues;
303 	nic_dev->num_rq = dev->data->nb_rx_queues;
304 
305 	nic_io->num_sqs =  dev->data->nb_tx_queues;
306 	nic_io->num_rqs = dev->data->nb_rx_queues;
307 
308 	/* queue pair is max_num(sq, rq) */
309 	nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ?
310 			nic_dev->num_sq : nic_dev->num_rq;
311 	nic_io->num_qps = nic_dev->num_qps;
312 
313 	if (nic_dev->num_qps > nic_io->max_qps) {
314 		PMD_DRV_LOG(ERR,
315 			"Queue number out of range, get queue_num:%d, max_queue_num:%d",
316 			nic_dev->num_qps, nic_io->max_qps);
317 		return -EINVAL;
318 	}
319 
320 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
321 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
322 
323 	/* mtu size is 256~9600 */
324 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
325 	    dev->data->dev_conf.rxmode.max_rx_pkt_len >
326 	    HINIC_MAX_JUMBO_FRAME_SIZE) {
327 		PMD_DRV_LOG(ERR,
328 			"Max rx pkt len out of range, get max_rx_pkt_len:%d, "
329 			"expect between %d and %d",
330 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
331 			HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
332 		return -EINVAL;
333 	}
334 
335 	nic_dev->mtu_size =
336 		HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
337 
338 	/* rss template */
339 	err = hinic_config_mq_mode(dev, TRUE);
340 	if (err) {
341 		PMD_DRV_LOG(ERR, "Config multi-queue failed");
342 		return err;
343 	}
344 
345 	/* init vlan offoad */
346 	err = hinic_vlan_offload_set(dev,
347 				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
348 	if (err) {
349 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
350 		(void)hinic_config_mq_mode(dev, FALSE);
351 		return err;
352 	}
353 
354 	/* clear fdir filter flag in function table */
355 	hinic_free_fdir_filter(nic_dev);
356 
357 	return HINIC_OK;
358 }
359 
360 /**
361  * DPDK callback to create the receive queue.
362  *
363  * @param dev
364  *   Pointer to Ethernet device structure.
365  * @param queue_idx
366  *   RX queue index.
367  * @param nb_desc
368  *   Number of descriptors for receive queue.
369  * @param socket_id
370  *   NUMA socket on which memory must be allocated.
371  * @param rx_conf
372  *   Thresholds parameters (unused_).
373  * @param mp
374  *   Memory pool for buffer allocations.
375  *
376  * @return
377  *   0 on success, negative error value otherwise.
378  */
379 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
380 			 uint16_t nb_desc, unsigned int socket_id,
381 			 __rte_unused const struct rte_eth_rxconf *rx_conf,
382 			 struct rte_mempool *mp)
383 {
384 	int rc;
385 	struct hinic_nic_dev *nic_dev;
386 	struct hinic_hwdev *hwdev;
387 	struct hinic_rxq *rxq;
388 	u16 rq_depth, rx_free_thresh;
389 	u32 buf_size;
390 
391 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
392 	hwdev = nic_dev->hwdev;
393 
394 	/* queue depth must be power of 2, otherwise will be aligned up */
395 	rq_depth = (nb_desc & (nb_desc - 1)) ?
396 		((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
397 
398 	/*
399 	 * Validate number of receive descriptors.
400 	 * It must not exceed hardware maximum and minimum.
401 	 */
402 	if (rq_depth > HINIC_MAX_QUEUE_DEPTH ||
403 		rq_depth < HINIC_MIN_QUEUE_DEPTH) {
404 		PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
405 			    HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
406 			    (int)nb_desc, (int)rq_depth,
407 			    (int)dev->data->port_id, (int)queue_idx);
408 		return -EINVAL;
409 	}
410 
411 	/*
412 	 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh
413 	 * descriptors are used or if the number of descriptors required
414 	 * to transmit a packet is greater than the number of free RX
415 	 * descriptors.
416 	 * The following constraints must be satisfied:
417 	 *  rx_free_thresh must be greater than 0.
418 	 *  rx_free_thresh must be less than the size of the ring minus 1.
419 	 * When set to zero use default values.
420 	 */
421 	rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ?
422 			rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH);
423 	if (rx_free_thresh >= (rq_depth - 1)) {
424 		PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)",
425 			    (unsigned int)rx_free_thresh,
426 			    (int)dev->data->port_id,
427 			    (int)queue_idx);
428 		return -EINVAL;
429 	}
430 
431 	rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq),
432 				 RTE_CACHE_LINE_SIZE, socket_id);
433 	if (!rxq) {
434 		PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s",
435 			    queue_idx, dev->data->name);
436 		return -ENOMEM;
437 	}
438 	nic_dev->rxqs[queue_idx] = rxq;
439 
440 	/* alloc rx sq hw wqe page */
441 	rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
442 	if (rc) {
443 		PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
444 			    queue_idx, dev->data->name, rq_depth);
445 		goto ceate_rq_fail;
446 	}
447 
448 	/* mbuf pool must be assigned before setup rx resources */
449 	rxq->mb_pool = mp;
450 
451 	rc =
452 	hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) -
453 				  RTE_PKTMBUF_HEADROOM, &buf_size);
454 	if (rc) {
455 		PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s",
456 			    dev->data->name);
457 		goto adjust_bufsize_fail;
458 	}
459 
460 	/* rx queue info, rearm control */
461 	rxq->wq = &hwdev->nic_io->rq_wq[queue_idx];
462 	rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr;
463 	rxq->nic_dev = nic_dev;
464 	rxq->q_id = queue_idx;
465 	rxq->q_depth = rq_depth;
466 	rxq->buf_len = (u16)buf_size;
467 	rxq->rx_free_thresh = rx_free_thresh;
468 	rxq->socket_id = socket_id;
469 
470 	/* the last point cant do mbuf rearm in bulk */
471 	rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
472 
473 	/* device port identifier */
474 	rxq->port_id = dev->data->port_id;
475 
476 	/* alloc rx_cqe and prepare rq_wqe */
477 	rc = hinic_setup_rx_resources(rxq);
478 	if (rc) {
479 		PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s",
480 			    queue_idx, dev->data->name);
481 		goto setup_rx_res_err;
482 	}
483 
484 	/* record nic_dev rxq in rte_eth rx_queues */
485 	dev->data->rx_queues[queue_idx] = rxq;
486 
487 	return 0;
488 
489 setup_rx_res_err:
490 adjust_bufsize_fail:
491 	hinic_destroy_rq(hwdev, queue_idx);
492 
493 ceate_rq_fail:
494 	rte_free(rxq);
495 
496 	return rc;
497 }
498 
499 static void hinic_reset_rx_queue(struct rte_eth_dev *dev)
500 {
501 	struct hinic_rxq *rxq;
502 	struct hinic_nic_dev *nic_dev;
503 	int q_id = 0;
504 
505 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
506 
507 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
508 		rxq = dev->data->rx_queues[q_id];
509 
510 		rxq->wq->cons_idx = 0;
511 		rxq->wq->prod_idx = 0;
512 		rxq->wq->delta = rxq->q_depth;
513 		rxq->wq->mask = rxq->q_depth - 1;
514 
515 		/* alloc mbuf to rq */
516 		hinic_rx_alloc_pkts(rxq);
517 	}
518 }
519 
520 /**
521  * DPDK callback to configure the transmit queue.
522  *
523  * @param dev
524  *   Pointer to Ethernet device structure.
525  * @param queue_idx
526  *   Transmit queue index.
527  * @param nb_desc
528  *   Number of descriptors for transmit queue.
529  * @param socket_id
530  *   NUMA socket on which memory must be allocated.
531  * @param tx_conf
532  *   Tx queue configuration parameters.
533  *
534  * @return
535  *   0 on success, negative error value otherwise.
536  */
537 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
538 			 uint16_t nb_desc, unsigned int socket_id,
539 			 __rte_unused const struct rte_eth_txconf *tx_conf)
540 {
541 	int rc;
542 	struct hinic_nic_dev *nic_dev;
543 	struct hinic_hwdev *hwdev;
544 	struct hinic_txq *txq;
545 	u16 sq_depth, tx_free_thresh;
546 
547 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
548 	hwdev = nic_dev->hwdev;
549 
550 	/* queue depth must be power of 2, otherwise will be aligned up */
551 	sq_depth = (nb_desc & (nb_desc - 1)) ?
552 			((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
553 
554 	/*
555 	 * Validate number of transmit descriptors.
556 	 * It must not exceed hardware maximum and minimum.
557 	 */
558 	if (sq_depth > HINIC_MAX_QUEUE_DEPTH ||
559 		sq_depth < HINIC_MIN_QUEUE_DEPTH) {
560 		PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
561 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
562 			  (int)nb_desc, (int)sq_depth,
563 			  (int)dev->data->port_id, (int)queue_idx);
564 		return -EINVAL;
565 	}
566 
567 	/*
568 	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
569 	 * descriptors are used or if the number of descriptors required
570 	 * to transmit a packet is greater than the number of free TX
571 	 * descriptors.
572 	 * The following constraints must be satisfied:
573 	 *  tx_free_thresh must be greater than 0.
574 	 *  tx_free_thresh must be less than the size of the ring minus 1.
575 	 * When set to zero use default values.
576 	 */
577 	tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ?
578 			tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH);
579 	if (tx_free_thresh >= (sq_depth - 1)) {
580 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)",
581 			(unsigned int)tx_free_thresh, (int)dev->data->port_id,
582 			(int)queue_idx);
583 		return -EINVAL;
584 	}
585 
586 	txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq),
587 				 RTE_CACHE_LINE_SIZE, socket_id);
588 	if (!txq) {
589 		PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s",
590 			    queue_idx, dev->data->name);
591 		return -ENOMEM;
592 	}
593 	nic_dev->txqs[queue_idx] = txq;
594 
595 	/* alloc tx sq hw wqepage */
596 	rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
597 	if (rc) {
598 		PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
599 			    queue_idx, dev->data->name, sq_depth);
600 		goto create_sq_fail;
601 	}
602 
603 	txq->q_id = queue_idx;
604 	txq->q_depth = sq_depth;
605 	txq->port_id = dev->data->port_id;
606 	txq->tx_free_thresh = tx_free_thresh;
607 	txq->nic_dev = nic_dev;
608 	txq->wq = &hwdev->nic_io->sq_wq[queue_idx];
609 	txq->sq = &hwdev->nic_io->qps[queue_idx].sq;
610 	txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr;
611 	txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq);
612 	txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
613 					sizeof(struct hinic_sq_bufdesc);
614 	txq->cos = nic_dev->default_cos;
615 	txq->socket_id = socket_id;
616 
617 	/* alloc software txinfo */
618 	rc = hinic_setup_tx_resources(txq);
619 	if (rc) {
620 		PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s",
621 			    queue_idx, dev->data->name);
622 		goto setup_tx_res_fail;
623 	}
624 
625 	/* record nic_dev txq in rte_eth tx_queues */
626 	dev->data->tx_queues[queue_idx] = txq;
627 
628 	return HINIC_OK;
629 
630 setup_tx_res_fail:
631 	hinic_destroy_sq(hwdev, queue_idx);
632 
633 create_sq_fail:
634 	rte_free(txq);
635 
636 	return rc;
637 }
638 
639 static void hinic_reset_tx_queue(struct rte_eth_dev *dev)
640 {
641 	struct hinic_nic_dev *nic_dev;
642 	struct hinic_txq *txq;
643 	struct hinic_nic_io *nic_io;
644 	struct hinic_hwdev *hwdev;
645 	volatile u32 *ci_addr;
646 	int q_id = 0;
647 
648 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
649 	hwdev = nic_dev->hwdev;
650 	nic_io = hwdev->nic_io;
651 
652 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
653 		txq = dev->data->tx_queues[q_id];
654 
655 		txq->wq->cons_idx = 0;
656 		txq->wq->prod_idx = 0;
657 		txq->wq->delta = txq->q_depth;
658 		txq->wq->mask  = txq->q_depth - 1;
659 
660 		/* clear hardware ci */
661 		ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base,
662 							q_id);
663 		*ci_addr = 0;
664 	}
665 }
666 
667 /**
668  * Get link speed from NIC.
669  *
670  * @param dev
671  *   Pointer to Ethernet device structure.
672  * @param speed_capa
673  *   Pointer to link speed structure.
674  */
675 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
676 {
677 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
678 	u32 supported_link, advertised_link;
679 	int err;
680 
681 #define HINIC_LINK_MODE_SUPPORT_1G	(1U << HINIC_GE_BASE_KX)
682 
683 #define HINIC_LINK_MODE_SUPPORT_10G	(1U << HINIC_10GE_BASE_KR)
684 
685 #define HINIC_LINK_MODE_SUPPORT_25G	((1U << HINIC_25GE_BASE_KR_S) | \
686 					(1U << HINIC_25GE_BASE_CR_S) | \
687 					(1U << HINIC_25GE_BASE_KR) | \
688 					(1U << HINIC_25GE_BASE_CR))
689 
690 #define HINIC_LINK_MODE_SUPPORT_40G	((1U << HINIC_40GE_BASE_KR4) | \
691 					(1U << HINIC_40GE_BASE_CR4))
692 
693 #define HINIC_LINK_MODE_SUPPORT_100G	((1U << HINIC_100GE_BASE_KR4) | \
694 					(1U << HINIC_100GE_BASE_CR4))
695 
696 	err = hinic_get_link_mode(nic_dev->hwdev,
697 				  &supported_link, &advertised_link);
698 	if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
699 	    advertised_link == HINIC_SUPPORTED_UNKNOWN) {
700 		PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u",
701 			  nic_dev->proc_dev_name, dev->data->port_id);
702 	} else {
703 		*speed_capa = 0;
704 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
705 			*speed_capa |= ETH_LINK_SPEED_1G;
706 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
707 			*speed_capa |= ETH_LINK_SPEED_10G;
708 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
709 			*speed_capa |= ETH_LINK_SPEED_25G;
710 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
711 			*speed_capa |= ETH_LINK_SPEED_40G;
712 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
713 			*speed_capa |= ETH_LINK_SPEED_100G;
714 	}
715 }
716 
717 /**
718  * DPDK callback to get information about the device.
719  *
720  * @param dev
721  *   Pointer to Ethernet device structure.
722  * @param info
723  *   Pointer to Info structure output buffer.
724  */
725 static int
726 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
727 {
728 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
729 
730 	info->max_rx_queues  = nic_dev->nic_cap.max_rqs;
731 	info->max_tx_queues  = nic_dev->nic_cap.max_sqs;
732 	info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE;
733 	info->max_rx_pktlen  = HINIC_MAX_JUMBO_FRAME_SIZE;
734 	info->max_mac_addrs  = HINIC_MAX_UC_MAC_ADDRS;
735 	info->min_mtu = HINIC_MIN_MTU_SIZE;
736 	info->max_mtu = HINIC_MAX_MTU_SIZE;
737 	info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE;
738 
739 	hinic_get_speed_capa(dev, &info->speed_capa);
740 	info->rx_queue_offload_capa = 0;
741 	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
742 				DEV_RX_OFFLOAD_IPV4_CKSUM |
743 				DEV_RX_OFFLOAD_UDP_CKSUM |
744 				DEV_RX_OFFLOAD_TCP_CKSUM |
745 				DEV_RX_OFFLOAD_VLAN_FILTER |
746 				DEV_RX_OFFLOAD_SCATTER |
747 				DEV_RX_OFFLOAD_JUMBO_FRAME |
748 				DEV_RX_OFFLOAD_TCP_LRO |
749 				DEV_RX_OFFLOAD_RSS_HASH;
750 
751 	info->tx_queue_offload_capa = 0;
752 	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
753 				DEV_TX_OFFLOAD_IPV4_CKSUM |
754 				DEV_TX_OFFLOAD_UDP_CKSUM |
755 				DEV_TX_OFFLOAD_TCP_CKSUM |
756 				DEV_TX_OFFLOAD_SCTP_CKSUM |
757 				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
758 				DEV_TX_OFFLOAD_TCP_TSO |
759 				DEV_TX_OFFLOAD_MULTI_SEGS;
760 
761 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
762 	info->reta_size = HINIC_RSS_INDIR_SIZE;
763 	info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
764 	info->rx_desc_lim = hinic_rx_desc_lim;
765 	info->tx_desc_lim = hinic_tx_desc_lim;
766 
767 	/* Driver-preferred Rx/Tx parameters */
768 	info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
769 	info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
770 	info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
771 	info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
772 	info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
773 	info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
774 
775 	return 0;
776 }
777 
778 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
779 				size_t fw_size)
780 {
781 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
782 	char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
783 	int err;
784 
785 	err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
786 	if (err) {
787 		PMD_DRV_LOG(ERR, "Failed to get fw version");
788 		return -EINVAL;
789 	}
790 
791 	if (fw_size < strlen(fw_ver) + 1)
792 		return (strlen(fw_ver) + 1);
793 
794 	snprintf(fw_version, fw_size, "%s", fw_ver);
795 
796 	return 0;
797 }
798 
799 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
800 {
801 	int err;
802 
803 	err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl);
804 	if (err) {
805 		PMD_DRV_LOG(ERR, "Failed to set rx mode");
806 		return -EINVAL;
807 	}
808 	nic_dev->rx_mode_status = rx_mode_ctrl;
809 
810 	return 0;
811 }
812 
813 static int hinic_rxtx_configure(struct rte_eth_dev *dev)
814 {
815 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
816 	int err;
817 
818 	/* rx configure, if rss enable, need to init default configuration */
819 	err = hinic_rx_configure(dev);
820 	if (err) {
821 		PMD_DRV_LOG(ERR, "Configure rss failed");
822 		return err;
823 	}
824 
825 	/* rx mode init */
826 	err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE);
827 	if (err) {
828 		PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed",
829 			HINIC_DEFAULT_RX_MODE);
830 		goto set_rx_mode_fail;
831 	}
832 
833 	return HINIC_OK;
834 
835 set_rx_mode_fail:
836 	hinic_rx_remove_configure(dev);
837 
838 	return err;
839 }
840 
841 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev)
842 {
843 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
844 
845 	(void)hinic_config_rx_mode(nic_dev, 0);
846 	hinic_rx_remove_configure(dev);
847 }
848 
849 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
850 					  struct rte_eth_link *link)
851 {
852 	int rc;
853 	u8 port_link_status = 0;
854 	struct nic_port_info port_link_info;
855 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
856 	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
857 					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
858 					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
859 					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
860 
861 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
862 	if (rc)
863 		return rc;
864 
865 	if (!port_link_status) {
866 		link->link_status = ETH_LINK_DOWN;
867 		link->link_speed = 0;
868 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
869 		link->link_autoneg = ETH_LINK_FIXED;
870 		return HINIC_OK;
871 	}
872 
873 	memset(&port_link_info, 0, sizeof(port_link_info));
874 	rc = hinic_get_port_info(nic_hwdev, &port_link_info);
875 	if (rc)
876 		return rc;
877 
878 	link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX];
879 	link->link_duplex = port_link_info.duplex;
880 	link->link_autoneg = port_link_info.autoneg_state;
881 	link->link_status = port_link_status;
882 
883 	return HINIC_OK;
884 }
885 
886 /**
887  * DPDK callback to retrieve physical link information.
888  *
889  * @param dev
890  *   Pointer to Ethernet device structure.
891  * @param wait_to_complete
892  *   Wait for request completion.
893  *
894  * @return
895  *   0 link status changed, -1 link status not changed
896  */
897 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
898 {
899 #define CHECK_INTERVAL 10  /* 10ms */
900 #define MAX_REPEAT_TIME 100  /* 1s (100 * 10ms) in total */
901 	int rc = HINIC_OK;
902 	struct rte_eth_link link;
903 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
904 	unsigned int rep_cnt = MAX_REPEAT_TIME;
905 
906 	memset(&link, 0, sizeof(link));
907 	do {
908 		/* Get link status information from hardware */
909 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
910 		if (rc != HINIC_OK) {
911 			link.link_speed = ETH_SPEED_NUM_NONE;
912 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
913 			PMD_DRV_LOG(ERR, "Get link status failed");
914 			goto out;
915 		}
916 
917 		if (!wait_to_complete || link.link_status)
918 			break;
919 
920 		rte_delay_ms(CHECK_INTERVAL);
921 	} while (rep_cnt--);
922 
923 out:
924 	rc = rte_eth_linkstatus_set(dev, &link);
925 	return rc;
926 }
927 
928 /**
929  * DPDK callback to bring the link UP.
930  *
931  * @param dev
932  *   Pointer to Ethernet device structure.
933  *
934  * @return
935  *   0 on success, negative errno value on failure.
936  */
937 static int hinic_dev_set_link_up(struct rte_eth_dev *dev)
938 {
939 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
940 	int ret;
941 
942 	ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true);
943 	if (ret) {
944 		PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d",
945 			    nic_dev->proc_dev_name, dev->data->port_id);
946 		return ret;
947 	}
948 
949 	/* link status follow phy port status, up will open pma */
950 	ret = hinic_set_port_enable(nic_dev->hwdev, true);
951 	if (ret)
952 		PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d",
953 			    nic_dev->proc_dev_name, dev->data->port_id);
954 
955 	return ret;
956 }
957 
958 /**
959  * DPDK callback to bring the link DOWN.
960  *
961  * @param dev
962  *   Pointer to Ethernet device structure.
963  *
964  * @return
965  *   0 on success, negative errno value on failure.
966  */
967 static int hinic_dev_set_link_down(struct rte_eth_dev *dev)
968 {
969 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
970 	int ret;
971 
972 	ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false);
973 	if (ret) {
974 		PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d",
975 			    nic_dev->proc_dev_name, dev->data->port_id);
976 		return ret;
977 	}
978 
979 	/* link status follow phy port status, up will close pma */
980 	ret = hinic_set_port_enable(nic_dev->hwdev, false);
981 	if (ret)
982 		PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d",
983 			    nic_dev->proc_dev_name, dev->data->port_id);
984 
985 	return ret;
986 }
987 
988 /**
989  * DPDK callback to start the device.
990  *
991  * @param dev
992  *   Pointer to Ethernet device structure.
993  *
994  * @return
995  *   0 on success, negative errno value on failure.
996  */
997 static int hinic_dev_start(struct rte_eth_dev *dev)
998 {
999 	int rc;
1000 	char *name;
1001 	struct hinic_nic_dev *nic_dev;
1002 
1003 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1004 	name = dev->data->name;
1005 
1006 	/* reset rx and tx queue */
1007 	hinic_reset_rx_queue(dev);
1008 	hinic_reset_tx_queue(dev);
1009 
1010 	/* get func rx buf size */
1011 	hinic_get_func_rx_buf_size(nic_dev);
1012 
1013 	/* init txq and rxq context */
1014 	rc = hinic_init_qp_ctxts(nic_dev->hwdev);
1015 	if (rc) {
1016 		PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s",
1017 			    name);
1018 		goto init_qp_fail;
1019 	}
1020 
1021 	/* rss template */
1022 	rc = hinic_config_mq_mode(dev, TRUE);
1023 	if (rc) {
1024 		PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s",
1025 			    name);
1026 		goto cfg_mq_mode_fail;
1027 	}
1028 
1029 	/* set default mtu */
1030 	rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size);
1031 	if (rc) {
1032 		PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s",
1033 			    nic_dev->mtu_size, name);
1034 		goto set_mtu_fail;
1035 	}
1036 
1037 	/* configure rss rx_mode and other rx or tx default feature */
1038 	rc = hinic_rxtx_configure(dev);
1039 	if (rc) {
1040 		PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s",
1041 			    name);
1042 		goto cfg_rxtx_fail;
1043 	}
1044 
1045 	/* reactive pf status, so that uP report asyn event */
1046 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
1047 
1048 	/* open virtual port and ready to start packet receiving */
1049 	rc = hinic_set_vport_enable(nic_dev->hwdev, true);
1050 	if (rc) {
1051 		PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name);
1052 		goto en_vport_fail;
1053 	}
1054 
1055 	/* open physical port and start packet receiving */
1056 	rc = hinic_set_port_enable(nic_dev->hwdev, true);
1057 	if (rc) {
1058 		PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s",
1059 			    name);
1060 		goto en_port_fail;
1061 	}
1062 
1063 	/* update eth_dev link status */
1064 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1065 		(void)hinic_link_update(dev, 0);
1066 
1067 	rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status);
1068 
1069 	return 0;
1070 
1071 en_port_fail:
1072 	(void)hinic_set_vport_enable(nic_dev->hwdev, false);
1073 
1074 en_vport_fail:
1075 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT);
1076 
1077 	/* Flush tx && rx chip resources in case of set vport fake fail */
1078 	(void)hinic_flush_qp_res(nic_dev->hwdev);
1079 	rte_delay_ms(100);
1080 
1081 	hinic_remove_rxtx_configure(dev);
1082 
1083 cfg_rxtx_fail:
1084 set_mtu_fail:
1085 cfg_mq_mode_fail:
1086 	hinic_free_qp_ctxts(nic_dev->hwdev);
1087 
1088 init_qp_fail:
1089 	hinic_free_all_rx_mbuf(dev);
1090 	hinic_free_all_tx_mbuf(dev);
1091 
1092 	return rc;
1093 }
1094 
1095 /**
1096  * DPDK callback to release the receive queue.
1097  *
1098  * @param queue
1099  *   Generic receive queue pointer.
1100  */
1101 static void hinic_rx_queue_release(void *queue)
1102 {
1103 	struct hinic_rxq *rxq = queue;
1104 	struct hinic_nic_dev *nic_dev;
1105 
1106 	if (!rxq) {
1107 		PMD_DRV_LOG(WARNING, "Rxq is null when release");
1108 		return;
1109 	}
1110 	nic_dev = rxq->nic_dev;
1111 
1112 	/* free rxq_pkt mbuf */
1113 	hinic_free_all_rx_mbufs(rxq);
1114 
1115 	/* free rxq_cqe, rxq_info */
1116 	hinic_free_rx_resources(rxq);
1117 
1118 	/* free root rq wq */
1119 	hinic_destroy_rq(nic_dev->hwdev, rxq->q_id);
1120 
1121 	nic_dev->rxqs[rxq->q_id] = NULL;
1122 
1123 	/* free rxq */
1124 	rte_free(rxq);
1125 }
1126 
1127 /**
1128  * DPDK callback to release the transmit queue.
1129  *
1130  * @param queue
1131  *   Generic transmit queue pointer.
1132  */
1133 static void hinic_tx_queue_release(void *queue)
1134 {
1135 	struct hinic_txq *txq = queue;
1136 	struct hinic_nic_dev *nic_dev;
1137 
1138 	if (!txq) {
1139 		PMD_DRV_LOG(WARNING, "Txq is null when release");
1140 		return;
1141 	}
1142 	nic_dev = txq->nic_dev;
1143 
1144 	/* free txq_pkt mbuf */
1145 	hinic_free_all_tx_mbufs(txq);
1146 
1147 	/* free txq_info */
1148 	hinic_free_tx_resources(txq);
1149 
1150 	/* free root sq wq */
1151 	hinic_destroy_sq(nic_dev->hwdev, txq->q_id);
1152 	nic_dev->txqs[txq->q_id] = NULL;
1153 
1154 	/* free txq */
1155 	rte_free(txq);
1156 }
1157 
1158 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev)
1159 {
1160 	u16 q_id;
1161 
1162 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
1163 		hinic_destroy_rq(nic_dev->hwdev, q_id);
1164 }
1165 
1166 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev)
1167 {
1168 	u16 q_id;
1169 
1170 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
1171 		hinic_destroy_sq(nic_dev->hwdev, q_id);
1172 }
1173 
1174 /**
1175  * DPDK callback to stop the device.
1176  *
1177  * @param dev
1178  *   Pointer to Ethernet device structure.
1179  */
1180 static int hinic_dev_stop(struct rte_eth_dev *dev)
1181 {
1182 	int rc;
1183 	char *name;
1184 	uint16_t port_id;
1185 	struct hinic_nic_dev *nic_dev;
1186 	struct rte_eth_link link;
1187 
1188 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1189 	name = dev->data->name;
1190 	port_id = dev->data->port_id;
1191 
1192 	dev->data->dev_started = 0;
1193 
1194 	if (!rte_bit_relaxed_test_and_clear32(HINIC_DEV_START,
1195 					      &nic_dev->dev_status)) {
1196 		PMD_DRV_LOG(INFO, "Device %s already stopped", name);
1197 		return 0;
1198 	}
1199 
1200 	/* just stop phy port and vport */
1201 	rc = hinic_set_port_enable(nic_dev->hwdev, false);
1202 	if (rc)
1203 		PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d",
1204 			  rc, name, port_id);
1205 
1206 	rc = hinic_set_vport_enable(nic_dev->hwdev, false);
1207 	if (rc)
1208 		PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d",
1209 			  rc, name, port_id);
1210 
1211 	/* Clear recorded link status */
1212 	memset(&link, 0, sizeof(link));
1213 	(void)rte_eth_linkstatus_set(dev, &link);
1214 
1215 	/* flush pending io request */
1216 	rc = hinic_rx_tx_flush(nic_dev->hwdev);
1217 	if (rc)
1218 		PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d",
1219 			    rc, name, port_id);
1220 
1221 	/* clean rss table and rx_mode */
1222 	hinic_remove_rxtx_configure(dev);
1223 
1224 	/* clean root context */
1225 	hinic_free_qp_ctxts(nic_dev->hwdev);
1226 
1227 	hinic_destroy_fdir_filter(dev);
1228 
1229 	/* free mbuf */
1230 	hinic_free_all_rx_mbuf(dev);
1231 	hinic_free_all_tx_mbuf(dev);
1232 
1233 	return 0;
1234 }
1235 
1236 static void hinic_disable_interrupt(struct rte_eth_dev *dev)
1237 {
1238 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1239 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1240 	int ret, retries = 0;
1241 
1242 	rte_bit_relaxed_clear32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
1243 
1244 	/* disable msix interrupt in hardware */
1245 	hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE);
1246 
1247 	/* disable rte interrupt */
1248 	ret = rte_intr_disable(&pci_dev->intr_handle);
1249 	if (ret)
1250 		PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret);
1251 
1252 	do {
1253 		ret =
1254 		rte_intr_callback_unregister(&pci_dev->intr_handle,
1255 					     hinic_dev_interrupt_handler, dev);
1256 		if (ret >= 0) {
1257 			break;
1258 		} else if (ret == -EAGAIN) {
1259 			rte_delay_ms(100);
1260 			retries++;
1261 		} else {
1262 			PMD_DRV_LOG(ERR, "intr callback unregister failed: %d",
1263 				    ret);
1264 			break;
1265 		}
1266 	} while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES);
1267 
1268 	if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES)
1269 		PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries",
1270 			    retries);
1271 }
1272 
1273 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable)
1274 {
1275 	u32 rx_mode_ctrl;
1276 	int err;
1277 
1278 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1279 	if (err)
1280 		return err;
1281 
1282 	rx_mode_ctrl = nic_dev->rx_mode_status;
1283 
1284 	if (enable)
1285 		rx_mode_ctrl |= HINIC_RX_MODE_PROMISC;
1286 	else
1287 		rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC);
1288 
1289 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1290 
1291 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1292 
1293 	return err;
1294 }
1295 
1296 /**
1297  * DPDK callback to get device statistics.
1298  *
1299  * @param dev
1300  *   Pointer to Ethernet device structure.
1301  * @param stats
1302  *   Stats structure output buffer.
1303  *
1304  * @return
1305  *   0 on success and stats is filled,
1306  *   negative error value otherwise.
1307  */
1308 static int
1309 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1310 {
1311 	int i, err, q_num;
1312 	u64 rx_discards_pmd = 0;
1313 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1314 	struct hinic_vport_stats vport_stats;
1315 	struct hinic_rxq	*rxq = NULL;
1316 	struct hinic_rxq_stats rxq_stats;
1317 	struct hinic_txq	*txq = NULL;
1318 	struct hinic_txq_stats txq_stats;
1319 
1320 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
1321 	if (err) {
1322 		PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s",
1323 			nic_dev->proc_dev_name);
1324 		return err;
1325 	}
1326 
1327 	dev->data->rx_mbuf_alloc_failed = 0;
1328 
1329 	/* rx queue stats */
1330 	q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1331 			nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1332 	for (i = 0; i < q_num; i++) {
1333 		rxq = nic_dev->rxqs[i];
1334 		hinic_rxq_get_stats(rxq, &rxq_stats);
1335 		stats->q_ipackets[i] = rxq_stats.packets;
1336 		stats->q_ibytes[i] = rxq_stats.bytes;
1337 		stats->q_errors[i] = rxq_stats.rx_discards;
1338 
1339 		stats->ierrors += rxq_stats.errors;
1340 		rx_discards_pmd += rxq_stats.rx_discards;
1341 		dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf;
1342 	}
1343 
1344 	/* tx queue stats */
1345 	q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1346 		nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1347 	for (i = 0; i < q_num; i++) {
1348 		txq = nic_dev->txqs[i];
1349 		hinic_txq_get_stats(txq, &txq_stats);
1350 		stats->q_opackets[i] = txq_stats.packets;
1351 		stats->q_obytes[i] = txq_stats.bytes;
1352 		stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs);
1353 	}
1354 
1355 	/* vport stats */
1356 	stats->oerrors += vport_stats.tx_discard_vport;
1357 
1358 	stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd;
1359 
1360 	stats->ipackets = (vport_stats.rx_unicast_pkts_vport +
1361 			vport_stats.rx_multicast_pkts_vport +
1362 			vport_stats.rx_broadcast_pkts_vport -
1363 			rx_discards_pmd);
1364 
1365 	stats->opackets = (vport_stats.tx_unicast_pkts_vport +
1366 			vport_stats.tx_multicast_pkts_vport +
1367 			vport_stats.tx_broadcast_pkts_vport);
1368 
1369 	stats->ibytes = (vport_stats.rx_unicast_bytes_vport +
1370 			vport_stats.rx_multicast_bytes_vport +
1371 			vport_stats.rx_broadcast_bytes_vport);
1372 
1373 	stats->obytes = (vport_stats.tx_unicast_bytes_vport +
1374 			vport_stats.tx_multicast_bytes_vport +
1375 			vport_stats.tx_broadcast_bytes_vport);
1376 	return 0;
1377 }
1378 
1379 /**
1380  * DPDK callback to clear device statistics.
1381  *
1382  * @param dev
1383  *   Pointer to Ethernet device structure.
1384  */
1385 static int hinic_dev_stats_reset(struct rte_eth_dev *dev)
1386 {
1387 	int qid;
1388 	struct hinic_rxq	*rxq = NULL;
1389 	struct hinic_txq	*txq = NULL;
1390 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1391 	int ret;
1392 
1393 	ret = hinic_clear_vport_stats(nic_dev->hwdev);
1394 	if (ret != 0)
1395 		return ret;
1396 
1397 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
1398 		rxq = nic_dev->rxqs[qid];
1399 		hinic_rxq_stats_reset(rxq);
1400 	}
1401 
1402 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
1403 		txq = nic_dev->txqs[qid];
1404 		hinic_txq_stats_reset(txq);
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * DPDK callback to clear device extended statistics.
1412  *
1413  * @param dev
1414  *   Pointer to Ethernet device structure.
1415  */
1416 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev)
1417 {
1418 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1419 	int ret;
1420 
1421 	ret = hinic_dev_stats_reset(dev);
1422 	if (ret != 0)
1423 		return ret;
1424 
1425 	if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) {
1426 		ret = hinic_clear_phy_port_stats(nic_dev->hwdev);
1427 		if (ret != 0)
1428 			return ret;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr)
1435 {
1436 	uint64_t random_value;
1437 
1438 	/* Set Organizationally Unique Identifier (OUI) prefix */
1439 	mac_addr->addr_bytes[0] = 0x00;
1440 	mac_addr->addr_bytes[1] = 0x09;
1441 	mac_addr->addr_bytes[2] = 0xC0;
1442 	/* Force indication of locally assigned MAC address. */
1443 	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1444 	/* Generate the last 3 bytes of the MAC address with a random number. */
1445 	random_value = rte_rand();
1446 	memcpy(&mac_addr->addr_bytes[3], &random_value, 3);
1447 }
1448 
1449 /**
1450  * Init mac_vlan table in NIC.
1451  *
1452  * @param dev
1453  *   Pointer to Ethernet device structure.
1454  *
1455  * @return
1456  *   0 on success and stats is filled,
1457  *   negative error value otherwise.
1458  */
1459 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev)
1460 {
1461 	struct hinic_nic_dev *nic_dev =
1462 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1463 	uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
1464 	u16 func_id = 0;
1465 	int rc = 0;
1466 
1467 	rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes);
1468 	if (rc)
1469 		return rc;
1470 
1471 	rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes,
1472 		&eth_dev->data->mac_addrs[0]);
1473 	if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[0]))
1474 		hinic_gen_random_mac_addr(&eth_dev->data->mac_addrs[0]);
1475 
1476 	func_id = hinic_global_func_id(nic_dev->hwdev);
1477 	rc = hinic_set_mac(nic_dev->hwdev,
1478 			eth_dev->data->mac_addrs[0].addr_bytes,
1479 			0, func_id);
1480 	if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1481 		return rc;
1482 
1483 	rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
1484 			&nic_dev->default_addr);
1485 
1486 	return 0;
1487 }
1488 
1489 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev)
1490 {
1491 	u16 func_id;
1492 	u32 i;
1493 
1494 	func_id = hinic_global_func_id(nic_dev->hwdev);
1495 
1496 	for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) {
1497 		if (rte_is_zero_ether_addr(&nic_dev->mc_list[i]))
1498 			break;
1499 
1500 		hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes,
1501 			      0, func_id);
1502 		memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr));
1503 	}
1504 }
1505 
1506 /**
1507  * Deinit mac_vlan table in NIC.
1508  *
1509  * @param dev
1510  *   Pointer to Ethernet device structure.
1511  *
1512  * @return
1513  *   0 on success and stats is filled,
1514  *   negative error value otherwise.
1515  */
1516 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
1517 {
1518 	struct hinic_nic_dev *nic_dev =
1519 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1520 	u16 func_id = 0;
1521 	int rc;
1522 	int i;
1523 
1524 	func_id = hinic_global_func_id(nic_dev->hwdev);
1525 
1526 	for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) {
1527 		if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[i]))
1528 			continue;
1529 
1530 		rc = hinic_del_mac(nic_dev->hwdev,
1531 				   eth_dev->data->mac_addrs[i].addr_bytes,
1532 				   0, func_id);
1533 		if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1534 			PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s",
1535 				    eth_dev->data->name);
1536 
1537 		memset(&eth_dev->data->mac_addrs[i], 0,
1538 		       sizeof(struct rte_ether_addr));
1539 	}
1540 
1541 	/* delete multicast mac addrs */
1542 	hinic_delete_mc_addr_list(nic_dev);
1543 }
1544 
1545 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1546 {
1547 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1548 	uint32_t frame_size;
1549 	int ret = 0;
1550 
1551 	PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
1552 			dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
1553 
1554 	if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
1555 		PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
1556 				mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
1557 		return -EINVAL;
1558 	}
1559 
1560 	ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
1561 	if (ret) {
1562 		PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
1563 		return ret;
1564 	}
1565 
1566 	/* update max frame size */
1567 	frame_size = HINIC_MTU_TO_PKTLEN(mtu);
1568 	if (frame_size > RTE_ETHER_MAX_LEN)
1569 		dev->data->dev_conf.rxmode.offloads |=
1570 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1571 	else
1572 		dev->data->dev_conf.rxmode.offloads &=
1573 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1574 
1575 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1576 	nic_dev->mtu_size = mtu;
1577 
1578 	return ret;
1579 }
1580 
1581 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev,
1582 					u16 vlan_id, bool on)
1583 {
1584 	u32 vid_idx, vid_bit;
1585 
1586 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1587 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1588 
1589 	if (on)
1590 		nic_dev->vfta[vid_idx] |= vid_bit;
1591 	else
1592 		nic_dev->vfta[vid_idx] &= ~vid_bit;
1593 }
1594 
1595 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev,
1596 				uint16_t vlan_id)
1597 {
1598 	u32 vid_idx, vid_bit;
1599 
1600 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1601 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1602 
1603 	return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE;
1604 }
1605 
1606 /**
1607  * DPDK callback to set vlan filter.
1608  *
1609  * @param dev
1610  *   Pointer to Ethernet device structure.
1611  * @param vlan_id
1612  *   vlan id is used to filter vlan packets
1613  * @param enable
1614  *   enable disable or enable vlan filter function
1615  */
1616 static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
1617 				uint16_t vlan_id, int enable)
1618 {
1619 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1620 	int err = 0;
1621 	u16 func_id;
1622 
1623 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
1624 		return -EINVAL;
1625 
1626 	func_id = hinic_global_func_id(nic_dev->hwdev);
1627 
1628 	if (enable) {
1629 		/* If vlanid is already set, just return */
1630 		if (hinic_find_vlan_filter(nic_dev, vlan_id)) {
1631 			PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s",
1632 				  vlan_id, nic_dev->proc_dev_name);
1633 			return 0;
1634 		}
1635 
1636 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1637 					    func_id, TRUE);
1638 	} else {
1639 		/* If vlanid can't be found, just return */
1640 		if (!hinic_find_vlan_filter(nic_dev, vlan_id)) {
1641 			PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s",
1642 				  vlan_id, nic_dev->proc_dev_name);
1643 			return 0;
1644 		}
1645 
1646 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1647 					    func_id, FALSE);
1648 	}
1649 
1650 	if (err) {
1651 		PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d",
1652 		      enable ? "Add" : "Remove", func_id, vlan_id, err);
1653 		return err;
1654 	}
1655 
1656 	hinic_store_vlan_filter(nic_dev, vlan_id, enable);
1657 
1658 	PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s",
1659 		  enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name);
1660 	return 0;
1661 }
1662 
1663 /**
1664  * DPDK callback to enable or disable vlan offload.
1665  *
1666  * @param dev
1667  *   Pointer to Ethernet device structure.
1668  * @param mask
1669  *   Definitions used for VLAN setting
1670  */
1671 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1672 {
1673 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1674 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1675 	bool on;
1676 	int err;
1677 
1678 	/* Enable or disable VLAN filter */
1679 	if (mask & ETH_VLAN_FILTER_MASK) {
1680 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
1681 			TRUE : FALSE;
1682 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
1683 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1684 			PMD_DRV_LOG(WARNING,
1685 				"Current matching version does not support vlan filter configuration, device: %s, port_id: %d",
1686 				  nic_dev->proc_dev_name, dev->data->port_id);
1687 		} else if (err) {
1688 			PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d",
1689 				  on ? "enable" : "disable",
1690 				  nic_dev->proc_dev_name,
1691 				  dev->data->port_id, err);
1692 			return err;
1693 		}
1694 
1695 		PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d",
1696 			  on ? "Enable" : "Disable",
1697 			  nic_dev->proc_dev_name, dev->data->port_id);
1698 	}
1699 
1700 	/* Enable or disable VLAN stripping */
1701 	if (mask & ETH_VLAN_STRIP_MASK) {
1702 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
1703 			TRUE : FALSE;
1704 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
1705 		if (err) {
1706 			PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d",
1707 				  on ? "enable" : "disable",
1708 				  nic_dev->proc_dev_name,
1709 				  dev->data->port_id, err);
1710 			return err;
1711 		}
1712 
1713 		PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d",
1714 			  on ? "Enable" : "Disable",
1715 			  nic_dev->proc_dev_name, dev->data->port_id);
1716 	}
1717 
1718 	return 0;
1719 }
1720 
1721 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev)
1722 {
1723 	struct hinic_nic_dev *nic_dev =
1724 		HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1725 	u16 func_id;
1726 	int i;
1727 
1728 	func_id = hinic_global_func_id(nic_dev->hwdev);
1729 	for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) {
1730 		/* If can't find it, continue */
1731 		if (!hinic_find_vlan_filter(nic_dev, i))
1732 			continue;
1733 
1734 		(void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE);
1735 		hinic_store_vlan_filter(nic_dev, i, false);
1736 	}
1737 }
1738 
1739 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev,
1740 				bool enable)
1741 {
1742 	u32 rx_mode_ctrl;
1743 	int err;
1744 
1745 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1746 	if (err)
1747 		return err;
1748 
1749 	rx_mode_ctrl = nic_dev->rx_mode_status;
1750 
1751 	if (enable)
1752 		rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL;
1753 	else
1754 		rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL);
1755 
1756 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1757 
1758 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1759 
1760 	return err;
1761 }
1762 
1763 /**
1764  * DPDK callback to enable allmulticast mode.
1765  *
1766  * @param dev
1767  *   Pointer to Ethernet device structure.
1768  *
1769  * @return
1770  *   0 on success,
1771  *   negative error value otherwise.
1772  */
1773 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev)
1774 {
1775 	int ret = HINIC_OK;
1776 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1777 
1778 	ret = hinic_set_dev_allmulticast(nic_dev, true);
1779 	if (ret) {
1780 		PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret);
1781 		return ret;
1782 	}
1783 
1784 	PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d",
1785 		nic_dev->proc_dev_name, dev->data->port_id);
1786 	return 0;
1787 }
1788 
1789 /**
1790  * DPDK callback to disable allmulticast mode.
1791  *
1792  * @param dev
1793  *   Pointer to Ethernet device structure.
1794  *
1795  * @return
1796  *   0 on success,
1797  *   negative error value otherwise.
1798  */
1799 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev)
1800 {
1801 	int ret = HINIC_OK;
1802 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1803 
1804 	ret = hinic_set_dev_allmulticast(nic_dev, false);
1805 	if (ret) {
1806 		PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret);
1807 		return ret;
1808 	}
1809 
1810 	PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d",
1811 		nic_dev->proc_dev_name, dev->data->port_id);
1812 	return 0;
1813 }
1814 
1815 /**
1816  * DPDK callback to enable promiscuous mode.
1817  *
1818  * @param dev
1819  *   Pointer to Ethernet device structure.
1820  *
1821  * @return
1822  *   0 on success,
1823  *   negative error value otherwise.
1824  */
1825 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev)
1826 {
1827 	int rc = HINIC_OK;
1828 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1829 
1830 	PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1831 		    nic_dev->proc_dev_name, dev->data->port_id,
1832 		    dev->data->promiscuous);
1833 
1834 	rc = hinic_set_dev_promiscuous(nic_dev, true);
1835 	if (rc)
1836 		PMD_DRV_LOG(ERR, "Enable promiscuous failed");
1837 
1838 	return rc;
1839 }
1840 
1841 /**
1842  * DPDK callback to disable promiscuous mode.
1843  *
1844  * @param dev
1845  *   Pointer to Ethernet device structure.
1846  *
1847  * @return
1848  *   0 on success,
1849  *   negative error value otherwise.
1850  */
1851 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev)
1852 {
1853 	int rc = HINIC_OK;
1854 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1855 
1856 	PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1857 		    nic_dev->proc_dev_name, dev->data->port_id,
1858 		    dev->data->promiscuous);
1859 
1860 	rc = hinic_set_dev_promiscuous(nic_dev, false);
1861 	if (rc)
1862 		PMD_DRV_LOG(ERR, "Disable promiscuous failed");
1863 
1864 	return rc;
1865 }
1866 
1867 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
1868 			struct rte_eth_fc_conf *fc_conf)
1869 {
1870 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1871 	struct nic_pause_config nic_pause;
1872 	int err;
1873 
1874 	memset(&nic_pause, 0, sizeof(nic_pause));
1875 
1876 	err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
1877 	if (err)
1878 		return err;
1879 
1880 	if (nic_dev->pause_set || !nic_pause.auto_neg) {
1881 		nic_pause.rx_pause = nic_dev->nic_pause.rx_pause;
1882 		nic_pause.tx_pause = nic_dev->nic_pause.tx_pause;
1883 	}
1884 
1885 	fc_conf->autoneg = nic_pause.auto_neg;
1886 
1887 	if (nic_pause.tx_pause && nic_pause.rx_pause)
1888 		fc_conf->mode = RTE_FC_FULL;
1889 	else if (nic_pause.tx_pause)
1890 		fc_conf->mode = RTE_FC_TX_PAUSE;
1891 	else if (nic_pause.rx_pause)
1892 		fc_conf->mode = RTE_FC_RX_PAUSE;
1893 	else
1894 		fc_conf->mode = RTE_FC_NONE;
1895 
1896 	return 0;
1897 }
1898 
1899 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
1900 			struct rte_eth_fc_conf *fc_conf)
1901 {
1902 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1903 	struct nic_pause_config nic_pause;
1904 	int err;
1905 
1906 	nic_pause.auto_neg = fc_conf->autoneg;
1907 
1908 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1909 		(fc_conf->mode & RTE_FC_TX_PAUSE))
1910 		nic_pause.tx_pause = true;
1911 	else
1912 		nic_pause.tx_pause = false;
1913 
1914 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1915 		(fc_conf->mode & RTE_FC_RX_PAUSE))
1916 		nic_pause.rx_pause = true;
1917 	else
1918 		nic_pause.rx_pause = false;
1919 
1920 	err = hinic_set_pause_config(nic_dev->hwdev, nic_pause);
1921 	if (err)
1922 		return err;
1923 
1924 	nic_dev->pause_set = true;
1925 	nic_dev->nic_pause.auto_neg = nic_pause.auto_neg;
1926 	nic_dev->nic_pause.rx_pause = nic_pause.rx_pause;
1927 	nic_dev->nic_pause.tx_pause = nic_pause.tx_pause;
1928 
1929 	PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n",
1930 		nic_pause.tx_pause ? "on" : "off",
1931 		nic_pause.rx_pause ? "on" : "off",
1932 		nic_pause.auto_neg ? "on" : "off");
1933 
1934 	return 0;
1935 }
1936 
1937 /**
1938  * DPDK callback to update the RSS hash key and RSS hash type.
1939  *
1940  * @param dev
1941  *   Pointer to Ethernet device structure.
1942  * @param rss_conf
1943  *   RSS configuration data.
1944  *
1945  * @return
1946  *   0 on success, negative error value otherwise.
1947  */
1948 static int hinic_rss_hash_update(struct rte_eth_dev *dev,
1949 			  struct rte_eth_rss_conf *rss_conf)
1950 {
1951 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1952 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1953 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
1954 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
1955 	u64 rss_hf = rss_conf->rss_hf;
1956 	struct nic_rss_type rss_type = {0};
1957 	int err = 0;
1958 
1959 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
1960 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
1961 		return HINIC_OK;
1962 	}
1963 
1964 	if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) {
1965 		PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d",
1966 			    rss_conf->rss_key_len);
1967 		return HINIC_ERROR;
1968 	}
1969 
1970 	if (rss_conf->rss_key) {
1971 		memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
1972 		err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx,
1973 						 hashkey);
1974 		if (err) {
1975 			PMD_DRV_LOG(ERR, "Set rss template table failed");
1976 			goto disable_rss;
1977 		}
1978 	}
1979 
1980 	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
1981 	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
1982 	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
1983 	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
1984 	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
1985 	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
1986 	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
1987 	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
1988 
1989 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
1990 	if (err) {
1991 		PMD_DRV_LOG(ERR, "Set rss type table failed");
1992 		goto disable_rss;
1993 	}
1994 
1995 	return 0;
1996 
1997 disable_rss:
1998 	memset(prio_tc, 0, sizeof(prio_tc));
1999 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
2000 	return err;
2001 }
2002 
2003 /**
2004  * DPDK callback to get the RSS hash configuration.
2005  *
2006  * @param dev
2007  *   Pointer to Ethernet device structure.
2008  * @param rss_conf
2009  *   RSS configuration data.
2010  *
2011  * @return
2012  *   0 on success, negative error value otherwise.
2013  */
2014 static int hinic_rss_conf_get(struct rte_eth_dev *dev,
2015 		       struct rte_eth_rss_conf *rss_conf)
2016 {
2017 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2018 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2019 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
2020 	struct nic_rss_type rss_type = {0};
2021 	int err;
2022 
2023 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
2024 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
2025 		return HINIC_ERROR;
2026 	}
2027 
2028 	err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
2029 	if (err)
2030 		return err;
2031 
2032 	if (rss_conf->rss_key &&
2033 	    rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) {
2034 		memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey));
2035 		rss_conf->rss_key_len = sizeof(hashkey);
2036 	}
2037 
2038 	err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type);
2039 	if (err)
2040 		return err;
2041 
2042 	rss_conf->rss_hf = 0;
2043 	rss_conf->rss_hf |=  rss_type.ipv4 ?
2044 		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
2045 	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2046 	rss_conf->rss_hf |=  rss_type.ipv6 ?
2047 		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
2048 	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
2049 	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2050 	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
2051 	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2052 	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2053 
2054 	return HINIC_OK;
2055 }
2056 
2057 /**
2058  * DPDK callback to update the RSS redirection table.
2059  *
2060  * @param dev
2061  *   Pointer to Ethernet device structure.
2062  * @param reta_conf
2063  *   Pointer to RSS reta configuration data.
2064  * @param reta_size
2065  *   Size of the RETA table.
2066  *
2067  * @return
2068  *   0 on success, negative error value otherwise.
2069  */
2070 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
2071 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2072 			      uint16_t reta_size)
2073 {
2074 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2075 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2076 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
2077 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2078 	int err = 0;
2079 	u16 i = 0;
2080 	u16 idx, shift;
2081 
2082 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
2083 		return HINIC_OK;
2084 
2085 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2086 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2087 		return HINIC_ERROR;
2088 	}
2089 
2090 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2091 	if (err)
2092 		return err;
2093 
2094 	/* update rss indir_tbl */
2095 	for (i = 0; i < reta_size; i++) {
2096 		idx = i / RTE_RETA_GROUP_SIZE;
2097 		shift = i % RTE_RETA_GROUP_SIZE;
2098 
2099 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
2100 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
2101 				"exceeds the maximum rxq num: %d", i,
2102 				reta_conf[idx].reta[shift], nic_dev->num_rq);
2103 			return -EINVAL;
2104 		}
2105 
2106 		if (reta_conf[idx].mask & (1ULL << shift))
2107 			indirtbl[i] = reta_conf[idx].reta[shift];
2108 	}
2109 
2110 	err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2111 	if (err)
2112 		goto disable_rss;
2113 
2114 	nic_dev->rss_indir_flag = true;
2115 
2116 	return 0;
2117 
2118 disable_rss:
2119 	memset(prio_tc, 0, sizeof(prio_tc));
2120 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
2121 
2122 	return HINIC_ERROR;
2123 }
2124 
2125 /**
2126  * DPDK callback to get the RSS indirection table.
2127  *
2128  * @param dev
2129  *   Pointer to Ethernet device structure.
2130  * @param reta_conf
2131  *   Pointer to RSS reta configuration data.
2132  * @param reta_size
2133  *   Size of the RETA table.
2134  *
2135  * @return
2136  *   0 on success, negative error value otherwise.
2137  */
2138 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
2139 			     struct rte_eth_rss_reta_entry64 *reta_conf,
2140 			     uint16_t reta_size)
2141 {
2142 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2143 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2144 	int err = 0;
2145 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2146 	u16 idx, shift;
2147 	u16 i = 0;
2148 
2149 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2150 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2151 		return HINIC_ERROR;
2152 	}
2153 
2154 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2155 	if (err) {
2156 		PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d",
2157 			    err);
2158 		return err;
2159 	}
2160 
2161 	for (i = 0; i < reta_size; i++) {
2162 		idx = i / RTE_RETA_GROUP_SIZE;
2163 		shift = i % RTE_RETA_GROUP_SIZE;
2164 		if (reta_conf[idx].mask & (1ULL << shift))
2165 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
2166 	}
2167 
2168 	return HINIC_OK;
2169 }
2170 
2171 /**
2172  * DPDK callback to get extended device statistics.
2173  *
2174  * @param dev
2175  *   Pointer to Ethernet device.
2176  * @param xstats
2177  *   Pointer to rte extended stats table.
2178  * @param n
2179  *   The size of the stats table.
2180  *
2181  * @return
2182  *   Number of extended stats on success and stats is filled,
2183  *   negative error value otherwise.
2184  */
2185 static int hinic_dev_xstats_get(struct rte_eth_dev *dev,
2186 			 struct rte_eth_xstat *xstats,
2187 			 unsigned int n)
2188 {
2189 	u16 qid = 0;
2190 	u32 i;
2191 	int err, count;
2192 	struct hinic_nic_dev *nic_dev;
2193 	struct hinic_phy_port_stats port_stats;
2194 	struct hinic_vport_stats vport_stats;
2195 	struct hinic_rxq	*rxq = NULL;
2196 	struct hinic_rxq_stats rxq_stats;
2197 	struct hinic_txq	*txq = NULL;
2198 	struct hinic_txq_stats txq_stats;
2199 
2200 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2201 	count = hinic_xstats_calc_num(nic_dev);
2202 	if ((int)n < count)
2203 		return count;
2204 
2205 	count = 0;
2206 
2207 	/* Get stats from hinic_rxq_stats */
2208 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
2209 		rxq = nic_dev->rxqs[qid];
2210 		hinic_rxq_get_stats(rxq, &rxq_stats);
2211 
2212 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2213 			xstats[count].value =
2214 				*(uint64_t *)(((char *)&rxq_stats) +
2215 				hinic_rxq_stats_strings[i].offset);
2216 			xstats[count].id = count;
2217 			count++;
2218 		}
2219 	}
2220 
2221 	/* Get stats from hinic_txq_stats */
2222 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
2223 		txq = nic_dev->txqs[qid];
2224 		hinic_txq_get_stats(txq, &txq_stats);
2225 
2226 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2227 			xstats[count].value =
2228 				*(uint64_t *)(((char *)&txq_stats) +
2229 				hinic_txq_stats_strings[i].offset);
2230 			xstats[count].id = count;
2231 			count++;
2232 		}
2233 	}
2234 
2235 	/* Get stats from hinic_vport_stats */
2236 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
2237 	if (err)
2238 		return err;
2239 
2240 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2241 		xstats[count].value =
2242 			*(uint64_t *)(((char *)&vport_stats) +
2243 			hinic_vport_stats_strings[i].offset);
2244 		xstats[count].id = count;
2245 		count++;
2246 	}
2247 
2248 	if (HINIC_IS_VF(nic_dev->hwdev))
2249 		return count;
2250 
2251 	/* Get stats from hinic_phy_port_stats */
2252 	err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats);
2253 	if (err)
2254 		return err;
2255 
2256 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2257 		xstats[count].value = *(uint64_t *)(((char *)&port_stats) +
2258 				hinic_phyport_stats_strings[i].offset);
2259 		xstats[count].id = count;
2260 		count++;
2261 	}
2262 
2263 	return count;
2264 }
2265 
2266 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2267 				struct rte_eth_rxq_info *qinfo)
2268 {
2269 	struct hinic_rxq  *rxq = dev->data->rx_queues[queue_id];
2270 
2271 	qinfo->mp = rxq->mb_pool;
2272 	qinfo->nb_desc = rxq->q_depth;
2273 }
2274 
2275 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2276 				struct rte_eth_txq_info *qinfo)
2277 {
2278 	struct hinic_txq  *txq = dev->data->tx_queues[queue_id];
2279 
2280 	qinfo->nb_desc = txq->q_depth;
2281 }
2282 
2283 /**
2284  * DPDK callback to retrieve names of extended device statistics
2285  *
2286  * @param dev
2287  *   Pointer to Ethernet device structure.
2288  * @param xstats_names
2289  *   Buffer to insert names into.
2290  *
2291  * @return
2292  *   Number of xstats names.
2293  */
2294 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
2295 			       struct rte_eth_xstat_name *xstats_names,
2296 			       __rte_unused unsigned int limit)
2297 {
2298 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2299 	int count = 0;
2300 	u16 i = 0, q_num;
2301 
2302 	if (xstats_names == NULL)
2303 		return hinic_xstats_calc_num(nic_dev);
2304 
2305 	/* get pmd rxq stats */
2306 	for (q_num = 0; q_num < nic_dev->num_rq; q_num++) {
2307 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2308 			snprintf(xstats_names[count].name,
2309 				 sizeof(xstats_names[count].name),
2310 				 "rxq%d_%s_pmd",
2311 				 q_num, hinic_rxq_stats_strings[i].name);
2312 			count++;
2313 		}
2314 	}
2315 
2316 	/* get pmd txq stats */
2317 	for (q_num = 0; q_num < nic_dev->num_sq; q_num++) {
2318 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2319 			snprintf(xstats_names[count].name,
2320 				 sizeof(xstats_names[count].name),
2321 				 "txq%d_%s_pmd",
2322 				 q_num, hinic_txq_stats_strings[i].name);
2323 			count++;
2324 		}
2325 	}
2326 
2327 	/* get vport stats */
2328 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2329 		snprintf(xstats_names[count].name,
2330 			 sizeof(xstats_names[count].name),
2331 			 "%s", hinic_vport_stats_strings[i].name);
2332 		count++;
2333 	}
2334 
2335 	if (HINIC_IS_VF(nic_dev->hwdev))
2336 		return count;
2337 
2338 	/* get phy port stats */
2339 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2340 		snprintf(xstats_names[count].name,
2341 			 sizeof(xstats_names[count].name),
2342 			 "%s", hinic_phyport_stats_strings[i].name);
2343 		count++;
2344 	}
2345 
2346 	return count;
2347 }
2348 
2349 /**
2350  *  DPDK callback to set mac address
2351  *
2352  * @param dev
2353  *   Pointer to Ethernet device structure.
2354  * @param addr
2355  *   Pointer to mac address
2356  * @return
2357  *   0 on success, negative error value otherwise.
2358  */
2359 static int hinic_set_mac_addr(struct rte_eth_dev *dev,
2360 			      struct rte_ether_addr *addr)
2361 {
2362 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2363 	u16 func_id;
2364 	int err;
2365 
2366 	func_id = hinic_global_func_id(nic_dev->hwdev);
2367 	err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes,
2368 			       addr->addr_bytes, 0, func_id);
2369 	if (err)
2370 		return err;
2371 
2372 	rte_ether_addr_copy(addr, &nic_dev->default_addr);
2373 
2374 	PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x",
2375 		    addr->addr_bytes[0], addr->addr_bytes[1],
2376 		    addr->addr_bytes[2], addr->addr_bytes[3],
2377 		    addr->addr_bytes[4], addr->addr_bytes[5]);
2378 
2379 	return 0;
2380 }
2381 
2382 /**
2383  * DPDK callback to remove a MAC address.
2384  *
2385  * @param dev
2386  *   Pointer to Ethernet device structure.
2387  * @param index
2388  *   MAC address index, should less than 128.
2389  */
2390 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2391 {
2392 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2393 	u16 func_id;
2394 	int ret;
2395 
2396 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2397 		PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range",
2398 			    index);
2399 		return;
2400 	}
2401 
2402 	func_id = hinic_global_func_id(nic_dev->hwdev);
2403 	ret = hinic_del_mac(nic_dev->hwdev,
2404 			    dev->data->mac_addrs[index].addr_bytes, 0, func_id);
2405 	if (ret)
2406 		return;
2407 
2408 	memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
2409 }
2410 
2411 /**
2412  * DPDK callback to add a MAC address.
2413  *
2414  * @param dev
2415  *   Pointer to Ethernet device structure.
2416  * @param mac_addr
2417  *   Pointer to MAC address
2418  * @param index
2419  *   MAC address index, should less than 128.
2420  * @param vmdq
2421  *   VMDq pool index(not used).
2422  *
2423  * @return
2424  *   0 on success, negative error value otherwise.
2425  */
2426 static int hinic_mac_addr_add(struct rte_eth_dev *dev,
2427 			      struct rte_ether_addr *mac_addr, uint32_t index,
2428 			      __rte_unused uint32_t vmdq)
2429 {
2430 	struct hinic_nic_dev  *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2431 	unsigned int i;
2432 	u16 func_id;
2433 	int ret;
2434 
2435 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2436 		PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index);
2437 		return -EINVAL;
2438 	}
2439 
2440 	/* First, make sure this address isn't already configured. */
2441 	for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) {
2442 		/* Skip this index, it's going to be reconfigured. */
2443 		if (i == index)
2444 			continue;
2445 
2446 		if (memcmp(&dev->data->mac_addrs[i],
2447 			mac_addr, sizeof(*mac_addr)))
2448 			continue;
2449 
2450 		PMD_DRV_LOG(INFO, "MAC address already configured");
2451 		return -EADDRINUSE;
2452 	}
2453 
2454 	func_id = hinic_global_func_id(nic_dev->hwdev);
2455 	ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id);
2456 	if (ret)
2457 		return ret;
2458 
2459 	dev->data->mac_addrs[index] = *mac_addr;
2460 	return 0;
2461 }
2462 
2463 /**
2464  *  DPDK callback to set multicast mac address
2465  *
2466  * @param dev
2467  *   Pointer to Ethernet device structure.
2468  * @param mc_addr_set
2469  *   Pointer to multicast mac address
2470  * @param nb_mc_addr
2471  *   mc addr count
2472  * @return
2473  *   0 on success, negative error value otherwise.
2474  */
2475 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
2476 				  struct rte_ether_addr *mc_addr_set,
2477 				  uint32_t nb_mc_addr)
2478 {
2479 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2480 	u16 func_id;
2481 	int ret;
2482 	u32 i;
2483 
2484 	func_id = hinic_global_func_id(nic_dev->hwdev);
2485 
2486 	/* delete old multi_cast addrs firstly */
2487 	hinic_delete_mc_addr_list(nic_dev);
2488 
2489 	if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS)
2490 		goto allmulti;
2491 
2492 	for (i = 0; i < nb_mc_addr; i++) {
2493 		ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes,
2494 				    0, func_id);
2495 		/* if add mc addr failed, set all multi_cast */
2496 		if (ret) {
2497 			hinic_delete_mc_addr_list(nic_dev);
2498 			goto allmulti;
2499 		}
2500 
2501 		rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]);
2502 	}
2503 
2504 	return 0;
2505 
2506 allmulti:
2507 	hinic_dev_allmulticast_enable(dev);
2508 
2509 	return 0;
2510 }
2511 
2512 /**
2513  * DPDK callback to manage filter control operations
2514  *
2515  * @param dev
2516  *   Pointer to Ethernet device structure.
2517  * @param filter_type
2518  *   Filter type, which just supports generic type.
2519  * @param filter_op
2520  *   Filter operation to perform.
2521  * @param arg
2522  *   Pointer to operation-specific structure.
2523  *
2524  * @return
2525  *   0 on success, negative error value otherwise.
2526  */
2527 static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,
2528 		     enum rte_filter_type filter_type,
2529 		     enum rte_filter_op filter_op,
2530 		     void *arg)
2531 {
2532 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2533 	int func_id = hinic_global_func_id(nic_dev->hwdev);
2534 
2535 	switch (filter_type) {
2536 	case RTE_ETH_FILTER_GENERIC:
2537 		if (filter_op != RTE_ETH_FILTER_GET)
2538 			return -EINVAL;
2539 		*(const void **)arg = &hinic_flow_ops;
2540 		break;
2541 	default:
2542 		PMD_DRV_LOG(INFO, "Filter type (%d) not supported",
2543 			filter_type);
2544 		return -EINVAL;
2545 	}
2546 
2547 	PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,"
2548 			"filter_op: 0x%x.", func_id, filter_type, filter_op);
2549 	return 0;
2550 }
2551 
2552 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)
2553 {
2554 	struct nic_pause_config pause_config = {0};
2555 	int err;
2556 
2557 	pause_config.auto_neg = 0;
2558 	pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2559 	pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2560 
2561 	err = hinic_set_pause_config(nic_dev->hwdev, pause_config);
2562 	if (err)
2563 		return err;
2564 
2565 	nic_dev->pause_set = true;
2566 	nic_dev->nic_pause.auto_neg = pause_config.auto_neg;
2567 	nic_dev->nic_pause.rx_pause = pause_config.rx_pause;
2568 	nic_dev->nic_pause.tx_pause = pause_config.tx_pause;
2569 
2570 	return 0;
2571 }
2572 
2573 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev)
2574 {
2575 	u8 up_tc[HINIC_DCB_UP_MAX] = {0};
2576 	u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
2577 	u8 up_bw[HINIC_DCB_UP_MAX] = {0};
2578 	u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
2579 	u8 up_strict[HINIC_DCB_UP_MAX] = {0};
2580 	int i = 0;
2581 
2582 	pg_bw[0] = 100;
2583 	for (i = 0; i < HINIC_DCB_UP_MAX; i++)
2584 		up_bw[i] = 100;
2585 
2586 	return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw,
2587 					up_pgid, up_bw, up_strict);
2588 }
2589 
2590 static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id)
2591 {
2592 	u8 default_cos = 0;
2593 	u8 valid_cos_bitmap;
2594 	u8 i;
2595 
2596 	valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap;
2597 	if (!valid_cos_bitmap) {
2598 		PMD_DRV_LOG(ERR, "PF has none cos to support\n");
2599 		return -EFAULT;
2600 	}
2601 
2602 	for (i = 0; i < NR_MAX_COS; i++) {
2603 		if (valid_cos_bitmap & BIT(i))
2604 			default_cos = i; /* Find max cos id as default cos */
2605 	}
2606 
2607 	*cos_id = default_cos;
2608 
2609 	return 0;
2610 }
2611 
2612 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev)
2613 {
2614 	u8 cos_id = 0;
2615 	int err;
2616 
2617 	if (!HINIC_IS_VF(nic_dev->hwdev)) {
2618 		err = hinic_pf_get_default_cos(nic_dev->hwdev, &cos_id);
2619 		if (err) {
2620 			PMD_DRV_LOG(ERR, "Get PF default cos failed, err: %d",
2621 				    err);
2622 			return HINIC_ERROR;
2623 		}
2624 	} else {
2625 		err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id);
2626 		if (err) {
2627 			PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d",
2628 				    err);
2629 			return HINIC_ERROR;
2630 		}
2631 	}
2632 
2633 	nic_dev->default_cos = cos_id;
2634 
2635 	PMD_DRV_LOG(INFO, "Default cos %d", nic_dev->default_cos);
2636 
2637 	return 0;
2638 }
2639 
2640 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
2641 {
2642 	int err;
2643 
2644 	err = hinic_init_default_cos(nic_dev);
2645 	if (err)
2646 		return err;
2647 
2648 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2649 		return 0;
2650 
2651 	/* Restore DCB configure to default status */
2652 	err = hinic_set_default_dcb_feature(nic_dev);
2653 	if (err)
2654 		return err;
2655 
2656 	/* Set pause enable, and up will disable pfc. */
2657 	err = hinic_set_default_pause_feature(nic_dev);
2658 	if (err)
2659 		return err;
2660 
2661 	err = hinic_reset_port_link_cfg(nic_dev->hwdev);
2662 	if (err)
2663 		return err;
2664 
2665 	err = hinic_set_link_status_follow(nic_dev->hwdev,
2666 					   HINIC_LINK_FOLLOW_PORT);
2667 	if (err == HINIC_MGMT_CMD_UNSUPPORTED)
2668 		PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status");
2669 	else if (err)
2670 		return err;
2671 
2672 	return hinic_set_anti_attack(nic_dev->hwdev, true);
2673 }
2674 
2675 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev)
2676 {
2677 	struct hinic_board_info info = { 0 };
2678 	int rc;
2679 
2680 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2681 		return 0;
2682 
2683 	rc = hinic_get_board_info(nic_dev->hwdev, &info);
2684 	if (rc)
2685 		return rc;
2686 
2687 	return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK :
2688 						HINIC_ERROR);
2689 }
2690 
2691 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev)
2692 {
2693 	nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name);
2694 	if (nic_dev->cpy_mpool == NULL) {
2695 		nic_dev->cpy_mpool =
2696 		rte_pktmbuf_pool_create(nic_dev->proc_dev_name,
2697 					HINIC_COPY_MEMPOOL_DEPTH,
2698 					0, 0,
2699 					HINIC_COPY_MBUF_SIZE,
2700 					rte_socket_id());
2701 		if (!nic_dev->cpy_mpool) {
2702 			PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s",
2703 				    rte_errno, nic_dev->proc_dev_name);
2704 			return -ENOMEM;
2705 		}
2706 	}
2707 
2708 	return 0;
2709 }
2710 
2711 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev)
2712 {
2713 	if (nic_dev->cpy_mpool != NULL)
2714 		rte_mempool_free(nic_dev->cpy_mpool);
2715 }
2716 
2717 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2718 {
2719 	u32 txq_size;
2720 	u32 rxq_size;
2721 
2722 	/* allocate software txq array */
2723 	txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs);
2724 	nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL);
2725 	if (!nic_dev->txqs) {
2726 		PMD_DRV_LOG(ERR, "Allocate txqs failed");
2727 		return -ENOMEM;
2728 	}
2729 
2730 	/* allocate software rxq array */
2731 	rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs);
2732 	nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL);
2733 	if (!nic_dev->rxqs) {
2734 		/* free txqs */
2735 		kfree(nic_dev->txqs);
2736 		nic_dev->txqs = NULL;
2737 
2738 		PMD_DRV_LOG(ERR, "Allocate rxqs failed");
2739 		return -ENOMEM;
2740 	}
2741 
2742 	return HINIC_OK;
2743 }
2744 
2745 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2746 {
2747 	kfree(nic_dev->txqs);
2748 	nic_dev->txqs = NULL;
2749 
2750 	kfree(nic_dev->rxqs);
2751 	nic_dev->rxqs = NULL;
2752 }
2753 
2754 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
2755 {
2756 	struct hinic_nic_dev *nic_dev =
2757 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2758 	int rc;
2759 
2760 	nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev),
2761 				     RTE_CACHE_LINE_SIZE);
2762 	if (!nic_dev->hwdev) {
2763 		PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s",
2764 			    eth_dev->data->name);
2765 		return -ENOMEM;
2766 	}
2767 	nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev);
2768 
2769 	/* init osdep*/
2770 	rc = hinic_osdep_init(nic_dev->hwdev);
2771 	if (rc) {
2772 		PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s",
2773 			    eth_dev->data->name);
2774 		goto init_osdep_fail;
2775 	}
2776 
2777 	/* init_hwif */
2778 	rc = hinic_hwif_res_init(nic_dev->hwdev);
2779 	if (rc) {
2780 		PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
2781 			    eth_dev->data->name);
2782 		goto init_hwif_fail;
2783 	}
2784 
2785 	/* init_cfg_mgmt */
2786 	rc = init_cfg_mgmt(nic_dev->hwdev);
2787 	if (rc) {
2788 		PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s",
2789 			    eth_dev->data->name);
2790 		goto init_cfgmgnt_fail;
2791 	}
2792 
2793 	/* init_aeqs */
2794 	rc = hinic_comm_aeqs_init(nic_dev->hwdev);
2795 	if (rc) {
2796 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s",
2797 			    eth_dev->data->name);
2798 		goto init_aeqs_fail;
2799 	}
2800 
2801 	/* init_pf_to_mgnt */
2802 	rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev);
2803 	if (rc) {
2804 		PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s",
2805 			    eth_dev->data->name);
2806 		goto init_pf_to_mgmt_fail;
2807 	}
2808 
2809 	/* init mailbox */
2810 	rc = hinic_comm_func_to_func_init(nic_dev->hwdev);
2811 	if (rc) {
2812 		PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s",
2813 			    eth_dev->data->name);
2814 		goto init_func_to_func_fail;
2815 	}
2816 
2817 	rc = hinic_card_workmode_check(nic_dev);
2818 	if (rc) {
2819 		PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s",
2820 			    eth_dev->data->name);
2821 		goto workmode_check_fail;
2822 	}
2823 
2824 	/* do l2nic reset to make chip clear */
2825 	rc = hinic_l2nic_reset(nic_dev->hwdev);
2826 	if (rc) {
2827 		PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s",
2828 			    eth_dev->data->name);
2829 		goto l2nic_reset_fail;
2830 	}
2831 
2832 	/* init dma and aeq msix attribute table */
2833 	(void)hinic_init_attr_table(nic_dev->hwdev);
2834 
2835 	/* init_cmdqs */
2836 	rc = hinic_comm_cmdqs_init(nic_dev->hwdev);
2837 	if (rc) {
2838 		PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s",
2839 			    eth_dev->data->name);
2840 		goto init_cmdq_fail;
2841 	}
2842 
2843 	/* set hardware state active */
2844 	rc = hinic_activate_hwdev_state(nic_dev->hwdev);
2845 	if (rc) {
2846 		PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s",
2847 			    eth_dev->data->name);
2848 		goto init_resources_state_fail;
2849 	}
2850 
2851 	/* init_capability */
2852 	rc = hinic_init_capability(nic_dev->hwdev);
2853 	if (rc) {
2854 		PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s",
2855 			    eth_dev->data->name);
2856 		goto init_cap_fail;
2857 	}
2858 
2859 	/* get nic capability */
2860 	if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) {
2861 		PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s",
2862 			    eth_dev->data->name);
2863 		rc = -EINVAL;
2864 		goto nic_check_fail;
2865 	}
2866 
2867 	/* init root cla and function table */
2868 	rc = hinic_init_nicio(nic_dev->hwdev);
2869 	if (rc) {
2870 		PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s",
2871 			    eth_dev->data->name);
2872 		goto init_nicio_fail;
2873 	}
2874 
2875 	/* init_software_txrxq */
2876 	rc = hinic_init_sw_rxtxqs(nic_dev);
2877 	if (rc) {
2878 		PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s",
2879 			    eth_dev->data->name);
2880 		goto init_sw_rxtxqs_fail;
2881 	}
2882 
2883 	rc = hinic_copy_mempool_init(nic_dev);
2884 	if (rc) {
2885 		PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s",
2886 			 eth_dev->data->name);
2887 		goto init_mpool_fail;
2888 	}
2889 
2890 	/* set hardware feature to default status */
2891 	rc = hinic_set_default_hw_feature(nic_dev);
2892 	if (rc) {
2893 		PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s",
2894 			    eth_dev->data->name);
2895 		goto set_default_hw_feature_fail;
2896 	}
2897 
2898 	return 0;
2899 
2900 set_default_hw_feature_fail:
2901 	hinic_copy_mempool_uninit(nic_dev);
2902 
2903 init_mpool_fail:
2904 	hinic_deinit_sw_rxtxqs(nic_dev);
2905 
2906 init_sw_rxtxqs_fail:
2907 	hinic_deinit_nicio(nic_dev->hwdev);
2908 
2909 nic_check_fail:
2910 init_nicio_fail:
2911 init_cap_fail:
2912 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2913 
2914 init_resources_state_fail:
2915 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2916 
2917 init_cmdq_fail:
2918 l2nic_reset_fail:
2919 workmode_check_fail:
2920 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2921 
2922 init_func_to_func_fail:
2923 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2924 
2925 init_pf_to_mgmt_fail:
2926 	hinic_comm_aeqs_free(nic_dev->hwdev);
2927 
2928 init_aeqs_fail:
2929 	free_cfg_mgmt(nic_dev->hwdev);
2930 
2931 init_cfgmgnt_fail:
2932 	hinic_hwif_res_free(nic_dev->hwdev);
2933 
2934 init_hwif_fail:
2935 	hinic_osdep_deinit(nic_dev->hwdev);
2936 
2937 init_osdep_fail:
2938 	rte_free(nic_dev->hwdev);
2939 	nic_dev->hwdev = NULL;
2940 
2941 	return rc;
2942 }
2943 
2944 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev)
2945 {
2946 	struct hinic_nic_dev *nic_dev =
2947 			HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2948 
2949 	(void)hinic_set_link_status_follow(nic_dev->hwdev,
2950 					   HINIC_LINK_FOLLOW_DEFAULT);
2951 	hinic_copy_mempool_uninit(nic_dev);
2952 	hinic_deinit_sw_rxtxqs(nic_dev);
2953 	hinic_deinit_nicio(nic_dev->hwdev);
2954 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2955 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2956 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2957 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2958 	hinic_comm_aeqs_free(nic_dev->hwdev);
2959 	free_cfg_mgmt(nic_dev->hwdev);
2960 	hinic_hwif_res_free(nic_dev->hwdev);
2961 	hinic_osdep_deinit(nic_dev->hwdev);
2962 	rte_free(nic_dev->hwdev);
2963 	nic_dev->hwdev = NULL;
2964 }
2965 
2966 /**
2967  * DPDK callback to close the device.
2968  *
2969  * @param dev
2970  *   Pointer to Ethernet device structure.
2971  */
2972 static int hinic_dev_close(struct rte_eth_dev *dev)
2973 {
2974 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2975 	int ret;
2976 
2977 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2978 		return 0;
2979 
2980 	if (rte_bit_relaxed_test_and_set32(HINIC_DEV_CLOSE,
2981 					   &nic_dev->dev_status)) {
2982 		PMD_DRV_LOG(WARNING, "Device %s already closed",
2983 			    dev->data->name);
2984 		return 0;
2985 	}
2986 
2987 	/* stop device first */
2988 	ret = hinic_dev_stop(dev);
2989 
2990 	/* rx_cqe, rx_info */
2991 	hinic_free_all_rx_resources(dev);
2992 
2993 	/* tx_info */
2994 	hinic_free_all_tx_resources(dev);
2995 
2996 	/* free wq, pi_dma_addr */
2997 	hinic_free_all_rq(nic_dev);
2998 
2999 	/* free wq, db_addr */
3000 	hinic_free_all_sq(nic_dev);
3001 
3002 	/* deinit mac vlan tbl */
3003 	hinic_deinit_mac_addr(dev);
3004 	hinic_remove_all_vlanid(dev);
3005 
3006 	/* disable hardware and uio interrupt */
3007 	hinic_disable_interrupt(dev);
3008 
3009 	/* deinit nic hardware device */
3010 	hinic_nic_dev_destroy(dev);
3011 
3012 	return ret;
3013 }
3014 
3015 static const struct eth_dev_ops hinic_pmd_ops = {
3016 	.dev_configure                 = hinic_dev_configure,
3017 	.dev_infos_get                 = hinic_dev_infos_get,
3018 	.fw_version_get                = hinic_fw_version_get,
3019 	.rx_queue_setup                = hinic_rx_queue_setup,
3020 	.tx_queue_setup                = hinic_tx_queue_setup,
3021 	.dev_start                     = hinic_dev_start,
3022 	.dev_set_link_up               = hinic_dev_set_link_up,
3023 	.dev_set_link_down             = hinic_dev_set_link_down,
3024 	.link_update                   = hinic_link_update,
3025 	.rx_queue_release              = hinic_rx_queue_release,
3026 	.tx_queue_release              = hinic_tx_queue_release,
3027 	.dev_stop                      = hinic_dev_stop,
3028 	.dev_close                     = hinic_dev_close,
3029 	.mtu_set                       = hinic_dev_set_mtu,
3030 	.vlan_filter_set               = hinic_vlan_filter_set,
3031 	.vlan_offload_set              = hinic_vlan_offload_set,
3032 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3033 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3034 	.promiscuous_enable            = hinic_dev_promiscuous_enable,
3035 	.promiscuous_disable           = hinic_dev_promiscuous_disable,
3036 	.flow_ctrl_get                 = hinic_flow_ctrl_get,
3037 	.flow_ctrl_set                 = hinic_flow_ctrl_set,
3038 	.rss_hash_update               = hinic_rss_hash_update,
3039 	.rss_hash_conf_get             = hinic_rss_conf_get,
3040 	.reta_update                   = hinic_rss_indirtbl_update,
3041 	.reta_query                    = hinic_rss_indirtbl_query,
3042 	.stats_get                     = hinic_dev_stats_get,
3043 	.stats_reset                   = hinic_dev_stats_reset,
3044 	.xstats_get                    = hinic_dev_xstats_get,
3045 	.xstats_reset                  = hinic_dev_xstats_reset,
3046 	.xstats_get_names              = hinic_dev_xstats_get_names,
3047 	.rxq_info_get                  = hinic_rxq_info_get,
3048 	.txq_info_get                  = hinic_txq_info_get,
3049 	.mac_addr_set                  = hinic_set_mac_addr,
3050 	.mac_addr_remove               = hinic_mac_addr_remove,
3051 	.mac_addr_add                  = hinic_mac_addr_add,
3052 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3053 	.filter_ctrl                   = hinic_dev_filter_ctrl,
3054 };
3055 
3056 static const struct eth_dev_ops hinic_pmd_vf_ops = {
3057 	.dev_configure                 = hinic_dev_configure,
3058 	.dev_infos_get                 = hinic_dev_infos_get,
3059 	.fw_version_get                = hinic_fw_version_get,
3060 	.rx_queue_setup                = hinic_rx_queue_setup,
3061 	.tx_queue_setup                = hinic_tx_queue_setup,
3062 	.dev_start                     = hinic_dev_start,
3063 	.link_update                   = hinic_link_update,
3064 	.rx_queue_release              = hinic_rx_queue_release,
3065 	.tx_queue_release              = hinic_tx_queue_release,
3066 	.dev_stop                      = hinic_dev_stop,
3067 	.dev_close                     = hinic_dev_close,
3068 	.mtu_set                       = hinic_dev_set_mtu,
3069 	.vlan_filter_set               = hinic_vlan_filter_set,
3070 	.vlan_offload_set              = hinic_vlan_offload_set,
3071 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3072 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3073 	.rss_hash_update               = hinic_rss_hash_update,
3074 	.rss_hash_conf_get             = hinic_rss_conf_get,
3075 	.reta_update                   = hinic_rss_indirtbl_update,
3076 	.reta_query                    = hinic_rss_indirtbl_query,
3077 	.stats_get                     = hinic_dev_stats_get,
3078 	.stats_reset                   = hinic_dev_stats_reset,
3079 	.xstats_get                    = hinic_dev_xstats_get,
3080 	.xstats_reset                  = hinic_dev_xstats_reset,
3081 	.xstats_get_names              = hinic_dev_xstats_get_names,
3082 	.rxq_info_get                  = hinic_rxq_info_get,
3083 	.txq_info_get                  = hinic_txq_info_get,
3084 	.mac_addr_set                  = hinic_set_mac_addr,
3085 	.mac_addr_remove               = hinic_mac_addr_remove,
3086 	.mac_addr_add                  = hinic_mac_addr_add,
3087 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3088 	.filter_ctrl                   = hinic_dev_filter_ctrl,
3089 };
3090 
3091 static int hinic_func_init(struct rte_eth_dev *eth_dev)
3092 {
3093 	struct rte_pci_device *pci_dev;
3094 	struct rte_ether_addr *eth_addr;
3095 	struct hinic_nic_dev *nic_dev;
3096 	struct hinic_filter_info *filter_info;
3097 	struct hinic_tcam_info *tcam_info;
3098 	u32 mac_size;
3099 	int rc;
3100 
3101 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3102 
3103 	/* EAL is SECONDARY and eth_dev is already created */
3104 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3105 		PMD_DRV_LOG(INFO, "Initialize %s in secondary process",
3106 			    eth_dev->data->name);
3107 
3108 		return 0;
3109 	}
3110 
3111 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3112 
3113 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
3114 	memset(nic_dev, 0, sizeof(*nic_dev));
3115 
3116 	snprintf(nic_dev->proc_dev_name,
3117 		 sizeof(nic_dev->proc_dev_name),
3118 		 "hinic-%.4x:%.2x:%.2x.%x",
3119 		 pci_dev->addr.domain, pci_dev->addr.bus,
3120 		 pci_dev->addr.devid, pci_dev->addr.function);
3121 
3122 	/* alloc mac_addrs */
3123 	mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3124 	eth_addr = rte_zmalloc("hinic_mac", mac_size, 0);
3125 	if (!eth_addr) {
3126 		PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s",
3127 			    eth_dev->data->name);
3128 		rc = -ENOMEM;
3129 		goto eth_addr_fail;
3130 	}
3131 	eth_dev->data->mac_addrs = eth_addr;
3132 
3133 	mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3134 	nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0);
3135 	if (!nic_dev->mc_list) {
3136 		PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s",
3137 			    eth_dev->data->name);
3138 		rc = -ENOMEM;
3139 		goto mc_addr_fail;
3140 	}
3141 
3142 	/* create hardware nic_device */
3143 	rc = hinic_nic_dev_create(eth_dev);
3144 	if (rc) {
3145 		PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s",
3146 			    eth_dev->data->name);
3147 		goto create_nic_dev_fail;
3148 	}
3149 
3150 	if (HINIC_IS_VF(nic_dev->hwdev))
3151 		eth_dev->dev_ops = &hinic_pmd_vf_ops;
3152 	else
3153 		eth_dev->dev_ops = &hinic_pmd_ops;
3154 
3155 	rc = hinic_init_mac_addr(eth_dev);
3156 	if (rc) {
3157 		PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s",
3158 			    eth_dev->data->name);
3159 		goto init_mac_fail;
3160 	}
3161 
3162 	/* register callback func to eal lib */
3163 	rc = rte_intr_callback_register(&pci_dev->intr_handle,
3164 					hinic_dev_interrupt_handler,
3165 					(void *)eth_dev);
3166 	if (rc) {
3167 		PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s",
3168 			    eth_dev->data->name);
3169 		goto reg_intr_cb_fail;
3170 	}
3171 
3172 	/* enable uio/vfio intr/eventfd mapping */
3173 	rc = rte_intr_enable(&pci_dev->intr_handle);
3174 	if (rc) {
3175 		PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s",
3176 			    eth_dev->data->name);
3177 		goto enable_intr_fail;
3178 	}
3179 	rte_bit_relaxed_set32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
3180 
3181 	hinic_mutex_init(&nic_dev->rx_mode_mutex, NULL);
3182 
3183 	/* initialize filter info */
3184 	filter_info = &nic_dev->filter;
3185 	tcam_info = &nic_dev->tcam;
3186 	memset(filter_info, 0, sizeof(struct hinic_filter_info));
3187 	memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
3188 	/* initialize 5tuple filter list */
3189 	TAILQ_INIT(&filter_info->fivetuple_list);
3190 	TAILQ_INIT(&tcam_info->tcam_list);
3191 	TAILQ_INIT(&nic_dev->filter_ntuple_list);
3192 	TAILQ_INIT(&nic_dev->filter_ethertype_list);
3193 	TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
3194 	TAILQ_INIT(&nic_dev->hinic_flow_list);
3195 
3196 	rte_bit_relaxed_set32(HINIC_DEV_INIT, &nic_dev->dev_status);
3197 	PMD_DRV_LOG(INFO, "Initialize %s in primary successfully",
3198 		    eth_dev->data->name);
3199 
3200 	return 0;
3201 
3202 enable_intr_fail:
3203 	(void)rte_intr_callback_unregister(&pci_dev->intr_handle,
3204 					   hinic_dev_interrupt_handler,
3205 					   (void *)eth_dev);
3206 
3207 reg_intr_cb_fail:
3208 	hinic_deinit_mac_addr(eth_dev);
3209 
3210 init_mac_fail:
3211 	eth_dev->dev_ops = NULL;
3212 	hinic_nic_dev_destroy(eth_dev);
3213 
3214 create_nic_dev_fail:
3215 	rte_free(nic_dev->mc_list);
3216 	nic_dev->mc_list = NULL;
3217 
3218 mc_addr_fail:
3219 	rte_free(eth_addr);
3220 	eth_dev->data->mac_addrs = NULL;
3221 
3222 eth_addr_fail:
3223 	PMD_DRV_LOG(ERR, "Initialize %s in primary failed",
3224 		    eth_dev->data->name);
3225 	return rc;
3226 }
3227 
3228 static int hinic_dev_init(struct rte_eth_dev *eth_dev)
3229 {
3230 	struct rte_pci_device *pci_dev;
3231 
3232 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3233 
3234 	PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process",
3235 		    pci_dev->addr.domain, pci_dev->addr.bus,
3236 		    pci_dev->addr.devid, pci_dev->addr.function,
3237 		    (rte_eal_process_type() == RTE_PROC_PRIMARY) ?
3238 		    "primary" : "secondary");
3239 
3240 	/* rte_eth_dev rx_burst and tx_burst */
3241 	eth_dev->rx_pkt_burst = hinic_recv_pkts;
3242 	eth_dev->tx_pkt_burst = hinic_xmit_pkts;
3243 
3244 	return hinic_func_init(eth_dev);
3245 }
3246 
3247 static int hinic_dev_uninit(struct rte_eth_dev *dev)
3248 {
3249 	struct hinic_nic_dev *nic_dev;
3250 
3251 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3252 	rte_bit_relaxed_clear32(HINIC_DEV_INIT, &nic_dev->dev_status);
3253 
3254 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3255 		return 0;
3256 
3257 	hinic_mutex_destroy(&nic_dev->rx_mode_mutex);
3258 
3259 	hinic_dev_close(dev);
3260 
3261 	rte_free(nic_dev->mc_list);
3262 
3263 	return HINIC_OK;
3264 }
3265 
3266 static struct rte_pci_id pci_id_hinic_map[] = {
3267 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
3268 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
3269 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
3270 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
3271 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },
3272 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) },
3273 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) },
3274 	{.vendor_id = 0},
3275 };
3276 
3277 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3278 			   struct rte_pci_device *pci_dev)
3279 {
3280 	return rte_eth_dev_pci_generic_probe(pci_dev,
3281 		sizeof(struct hinic_nic_dev), hinic_dev_init);
3282 }
3283 
3284 static int hinic_pci_remove(struct rte_pci_device *pci_dev)
3285 {
3286 	return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit);
3287 }
3288 
3289 static struct rte_pci_driver rte_hinic_pmd = {
3290 	.id_table = pci_id_hinic_map,
3291 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3292 	.probe = hinic_pci_probe,
3293 	.remove = hinic_pci_remove,
3294 };
3295 
3296 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd);
3297 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map);
3298 RTE_LOG_REGISTER(hinic_logtype, pmd.net.hinic, INFO);
3299