xref: /dpdk/drivers/net/hinic/hinic_pmd_ethdev.c (revision 03ab51eafda992874a48c392ca66ffb577fe2b71)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include <rte_pci.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_ether.h>
14 
15 #include "base/hinic_compat.h"
16 #include "base/hinic_pmd_hwdev.h"
17 #include "base/hinic_pmd_hwif.h"
18 #include "base/hinic_pmd_wq.h"
19 #include "base/hinic_pmd_cfg.h"
20 #include "base/hinic_pmd_mgmt.h"
21 #include "base/hinic_pmd_cmdq.h"
22 #include "base/hinic_pmd_niccfg.h"
23 #include "base/hinic_pmd_nicio.h"
24 #include "base/hinic_pmd_mbox.h"
25 #include "hinic_pmd_ethdev.h"
26 #include "hinic_pmd_tx.h"
27 #include "hinic_pmd_rx.h"
28 
29 /* Vendor ID used by Huawei devices */
30 #define HINIC_HUAWEI_VENDOR_ID		0x19E5
31 
32 /* Hinic devices */
33 #define HINIC_DEV_ID_PRD		0x1822
34 #define HINIC_DEV_ID_VF			0x375E
35 #define HINIC_DEV_ID_VF_HV		0x379E
36 
37 /* Mezz card for Blade Server */
38 #define HINIC_DEV_ID_MEZZ_25GE		0x0210
39 #define HINIC_DEV_ID_MEZZ_100GE		0x0205
40 
41 /* 2*25G and 2*100G card */
42 #define HINIC_DEV_ID_1822_DUAL_25GE	0x0206
43 #define HINIC_DEV_ID_1822_100GE		0x0200
44 
45 #define HINIC_SERVICE_MODE_NIC		2
46 
47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES	10
48 
49 #define DEFAULT_BASE_COS		4
50 #define NR_MAX_COS			8
51 
52 #define HINIC_MIN_RX_BUF_SIZE		1024
53 #define HINIC_MAX_UC_MAC_ADDRS		128
54 #define HINIC_MAX_MC_MAC_ADDRS		2048
55 
56 #define HINIC_DEFAULT_BURST_SIZE	32
57 #define HINIC_DEFAULT_NB_QUEUES		1
58 #define HINIC_DEFAULT_RING_SIZE		1024
59 #define HINIC_MAX_LRO_SIZE		65536
60 
61 /*
62  * vlan_id is a 12 bit number.
63  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
64  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
65  * The higher 7 bit val specifies VFTA array index.
66  */
67 #define HINIC_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
68 #define HINIC_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
69 
70 #define HINIC_VLAN_FILTER_EN		(1U << 0)
71 
72 /* lro numer limit for one packet */
73 #define HINIC_LRO_WQE_NUM_DEFAULT	8
74 
75 struct hinic_xstats_name_off {
76 	char name[RTE_ETH_XSTATS_NAME_SIZE];
77 	u32  offset;
78 };
79 
80 #define HINIC_FUNC_STAT(_stat_item) {	\
81 	.name = #_stat_item, \
82 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
83 }
84 
85 #define HINIC_PORT_STAT(_stat_item) { \
86 	.name = #_stat_item, \
87 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
88 }
89 
90 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = {
91 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
92 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
93 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
94 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
95 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
96 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
97 
98 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
99 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
100 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
101 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
102 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
103 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
104 
105 	HINIC_FUNC_STAT(tx_discard_vport),
106 	HINIC_FUNC_STAT(rx_discard_vport),
107 	HINIC_FUNC_STAT(tx_err_vport),
108 	HINIC_FUNC_STAT(rx_err_vport),
109 };
110 
111 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \
112 		sizeof(hinic_vport_stats_strings[0]))
113 
114 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = {
115 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
116 	HINIC_PORT_STAT(mac_rx_total_oct_num),
117 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
118 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
119 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
120 	HINIC_PORT_STAT(mac_rx_good_oct_num),
121 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
122 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
123 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
124 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
125 	HINIC_PORT_STAT(mac_tx_total_oct_num),
126 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
127 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
128 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
129 	HINIC_PORT_STAT(mac_tx_good_oct_num),
130 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
131 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
132 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
133 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
134 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
135 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
136 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
137 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
138 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
139 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
140 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
141 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
142 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
143 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
144 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
145 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
146 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
147 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
148 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
149 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
150 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
151 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
152 	HINIC_PORT_STAT(mac_rx_mac_pause_num),
153 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
154 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
155 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
156 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
157 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
158 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
159 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
160 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
161 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
162 	HINIC_PORT_STAT(mac_rx_mac_control_pkt_num),
163 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
164 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
165 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
166 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
167 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
168 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
169 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
170 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
171 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
172 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
173 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
174 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
175 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
176 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
177 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
178 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
179 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
180 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
181 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
182 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
183 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
184 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
185 	HINIC_PORT_STAT(mac_trans_jabber_pkt_num),
186 	HINIC_PORT_STAT(mac_tx_mac_pause_num),
187 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
188 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
189 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
190 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
191 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
192 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
193 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
194 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
195 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
196 	HINIC_PORT_STAT(mac_tx_mac_control_pkt_num),
197 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
198 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
199 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
200 };
201 
202 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \
203 		sizeof(hinic_phyport_stats_strings[0]))
204 
205 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = {
206 	{"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)},
207 	{"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)},
208 };
209 
210 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \
211 		sizeof(hinic_rxq_stats_strings[0]))
212 
213 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = {
214 	{"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)},
215 	{"offload_errors", offsetof(struct hinic_txq_stats, off_errs)},
216 	{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
217 	{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
218 	{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
219 	{"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)},
220 	{"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)},
221 };
222 
223 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
224 		sizeof(hinic_txq_stats_strings[0]))
225 
226 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev)
227 {
228 	if (HINIC_IS_VF(nic_dev->hwdev)) {
229 		return (HINIC_VPORT_XSTATS_NUM +
230 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
231 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
232 	} else {
233 		return (HINIC_VPORT_XSTATS_NUM +
234 			HINIC_PHYPORT_XSTATS_NUM +
235 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
236 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
237 	}
238 }
239 
240 static const struct rte_eth_desc_lim hinic_rx_desc_lim = {
241 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
242 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
243 	.nb_align = HINIC_RXD_ALIGN,
244 };
245 
246 static const struct rte_eth_desc_lim hinic_tx_desc_lim = {
247 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
248 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
249 	.nb_align = HINIC_TXD_ALIGN,
250 };
251 
252 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
253 
254 /**
255  * Interrupt handler triggered by NIC  for handling
256  * specific event.
257  *
258  * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
259  */
260 static void hinic_dev_interrupt_handler(void *param)
261 {
262 	struct rte_eth_dev *dev = param;
263 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
264 
265 	if (!rte_bit_relaxed_get32(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) {
266 		PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d",
267 			    nic_dev->proc_dev_name, dev->data->port_id);
268 		return;
269 	}
270 
271 	/* aeq0 msg handler */
272 	hinic_dev_handle_aeq_event(nic_dev->hwdev, param);
273 }
274 
275 /**
276  * Ethernet device configuration.
277  *
278  * Prepare the driver for a given number of TX and RX queues, mtu size
279  * and configure RSS.
280  *
281  * @param dev
282  *   Pointer to Ethernet device structure.
283  *
284  * @return
285  *   0 on success, negative error value otherwise.
286  */
287 static int hinic_dev_configure(struct rte_eth_dev *dev)
288 {
289 	struct hinic_nic_dev *nic_dev;
290 	struct hinic_nic_io *nic_io;
291 	int err;
292 
293 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
294 	nic_io = nic_dev->hwdev->nic_io;
295 
296 	nic_dev->num_sq =  dev->data->nb_tx_queues;
297 	nic_dev->num_rq = dev->data->nb_rx_queues;
298 
299 	nic_io->num_sqs =  dev->data->nb_tx_queues;
300 	nic_io->num_rqs = dev->data->nb_rx_queues;
301 
302 	/* queue pair is max_num(sq, rq) */
303 	nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ?
304 			nic_dev->num_sq : nic_dev->num_rq;
305 	nic_io->num_qps = nic_dev->num_qps;
306 
307 	if (nic_dev->num_qps > nic_io->max_qps) {
308 		PMD_DRV_LOG(ERR,
309 			"Queue number out of range, get queue_num:%d, max_queue_num:%d",
310 			nic_dev->num_qps, nic_io->max_qps);
311 		return -EINVAL;
312 	}
313 
314 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
315 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
316 
317 	/* mtu size is 256~9600 */
318 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
319 	    dev->data->dev_conf.rxmode.max_rx_pkt_len >
320 	    HINIC_MAX_JUMBO_FRAME_SIZE) {
321 		PMD_DRV_LOG(ERR,
322 			"Max rx pkt len out of range, get max_rx_pkt_len:%d, "
323 			"expect between %d and %d",
324 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
325 			HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
326 		return -EINVAL;
327 	}
328 
329 	nic_dev->mtu_size =
330 		HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
331 
332 	/* rss template */
333 	err = hinic_config_mq_mode(dev, TRUE);
334 	if (err) {
335 		PMD_DRV_LOG(ERR, "Config multi-queue failed");
336 		return err;
337 	}
338 
339 	/* init vlan offoad */
340 	err = hinic_vlan_offload_set(dev,
341 				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
342 	if (err) {
343 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
344 		(void)hinic_config_mq_mode(dev, FALSE);
345 		return err;
346 	}
347 
348 	/* clear fdir filter flag in function table */
349 	hinic_free_fdir_filter(nic_dev);
350 
351 	return HINIC_OK;
352 }
353 
354 /**
355  * DPDK callback to create the receive queue.
356  *
357  * @param dev
358  *   Pointer to Ethernet device structure.
359  * @param queue_idx
360  *   RX queue index.
361  * @param nb_desc
362  *   Number of descriptors for receive queue.
363  * @param socket_id
364  *   NUMA socket on which memory must be allocated.
365  * @param rx_conf
366  *   Thresholds parameters (unused_).
367  * @param mp
368  *   Memory pool for buffer allocations.
369  *
370  * @return
371  *   0 on success, negative error value otherwise.
372  */
373 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
374 			 uint16_t nb_desc, unsigned int socket_id,
375 			 __rte_unused const struct rte_eth_rxconf *rx_conf,
376 			 struct rte_mempool *mp)
377 {
378 	int rc;
379 	struct hinic_nic_dev *nic_dev;
380 	struct hinic_hwdev *hwdev;
381 	struct hinic_rxq *rxq;
382 	u16 rq_depth, rx_free_thresh;
383 	u32 buf_size;
384 
385 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
386 	hwdev = nic_dev->hwdev;
387 
388 	/* queue depth must be power of 2, otherwise will be aligned up */
389 	rq_depth = (nb_desc & (nb_desc - 1)) ?
390 		((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
391 
392 	/*
393 	 * Validate number of receive descriptors.
394 	 * It must not exceed hardware maximum and minimum.
395 	 */
396 	if (rq_depth > HINIC_MAX_QUEUE_DEPTH ||
397 		rq_depth < HINIC_MIN_QUEUE_DEPTH) {
398 		PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
399 			    HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
400 			    (int)nb_desc, (int)rq_depth,
401 			    (int)dev->data->port_id, (int)queue_idx);
402 		return -EINVAL;
403 	}
404 
405 	/*
406 	 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh
407 	 * descriptors are used or if the number of descriptors required
408 	 * to transmit a packet is greater than the number of free RX
409 	 * descriptors.
410 	 * The following constraints must be satisfied:
411 	 *  rx_free_thresh must be greater than 0.
412 	 *  rx_free_thresh must be less than the size of the ring minus 1.
413 	 * When set to zero use default values.
414 	 */
415 	rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ?
416 			rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH);
417 	if (rx_free_thresh >= (rq_depth - 1)) {
418 		PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)",
419 			    (unsigned int)rx_free_thresh,
420 			    (int)dev->data->port_id,
421 			    (int)queue_idx);
422 		return -EINVAL;
423 	}
424 
425 	rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq),
426 				 RTE_CACHE_LINE_SIZE, socket_id);
427 	if (!rxq) {
428 		PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s",
429 			    queue_idx, dev->data->name);
430 		return -ENOMEM;
431 	}
432 	nic_dev->rxqs[queue_idx] = rxq;
433 
434 	/* alloc rx sq hw wqe page */
435 	rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
436 	if (rc) {
437 		PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
438 			    queue_idx, dev->data->name, rq_depth);
439 		goto ceate_rq_fail;
440 	}
441 
442 	/* mbuf pool must be assigned before setup rx resources */
443 	rxq->mb_pool = mp;
444 
445 	rc =
446 	hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) -
447 				  RTE_PKTMBUF_HEADROOM, &buf_size);
448 	if (rc) {
449 		PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s",
450 			    dev->data->name);
451 		goto adjust_bufsize_fail;
452 	}
453 
454 	/* rx queue info, rearm control */
455 	rxq->wq = &hwdev->nic_io->rq_wq[queue_idx];
456 	rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr;
457 	rxq->nic_dev = nic_dev;
458 	rxq->q_id = queue_idx;
459 	rxq->q_depth = rq_depth;
460 	rxq->buf_len = (u16)buf_size;
461 	rxq->rx_free_thresh = rx_free_thresh;
462 	rxq->socket_id = socket_id;
463 
464 	/* the last point cant do mbuf rearm in bulk */
465 	rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
466 
467 	/* device port identifier */
468 	rxq->port_id = dev->data->port_id;
469 
470 	/* alloc rx_cqe and prepare rq_wqe */
471 	rc = hinic_setup_rx_resources(rxq);
472 	if (rc) {
473 		PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s",
474 			    queue_idx, dev->data->name);
475 		goto setup_rx_res_err;
476 	}
477 
478 	/* record nic_dev rxq in rte_eth rx_queues */
479 	dev->data->rx_queues[queue_idx] = rxq;
480 
481 	return 0;
482 
483 setup_rx_res_err:
484 adjust_bufsize_fail:
485 	hinic_destroy_rq(hwdev, queue_idx);
486 
487 ceate_rq_fail:
488 	rte_free(rxq);
489 
490 	return rc;
491 }
492 
493 static void hinic_reset_rx_queue(struct rte_eth_dev *dev)
494 {
495 	struct hinic_rxq *rxq;
496 	struct hinic_nic_dev *nic_dev;
497 	int q_id = 0;
498 
499 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
500 
501 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
502 		rxq = dev->data->rx_queues[q_id];
503 
504 		rxq->wq->cons_idx = 0;
505 		rxq->wq->prod_idx = 0;
506 		rxq->wq->delta = rxq->q_depth;
507 		rxq->wq->mask = rxq->q_depth - 1;
508 
509 		/* alloc mbuf to rq */
510 		hinic_rx_alloc_pkts(rxq);
511 	}
512 }
513 
514 /**
515  * DPDK callback to configure the transmit queue.
516  *
517  * @param dev
518  *   Pointer to Ethernet device structure.
519  * @param queue_idx
520  *   Transmit queue index.
521  * @param nb_desc
522  *   Number of descriptors for transmit queue.
523  * @param socket_id
524  *   NUMA socket on which memory must be allocated.
525  * @param tx_conf
526  *   Tx queue configuration parameters.
527  *
528  * @return
529  *   0 on success, negative error value otherwise.
530  */
531 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
532 			 uint16_t nb_desc, unsigned int socket_id,
533 			 __rte_unused const struct rte_eth_txconf *tx_conf)
534 {
535 	int rc;
536 	struct hinic_nic_dev *nic_dev;
537 	struct hinic_hwdev *hwdev;
538 	struct hinic_txq *txq;
539 	u16 sq_depth, tx_free_thresh;
540 
541 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
542 	hwdev = nic_dev->hwdev;
543 
544 	/* queue depth must be power of 2, otherwise will be aligned up */
545 	sq_depth = (nb_desc & (nb_desc - 1)) ?
546 			((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
547 
548 	/*
549 	 * Validate number of transmit descriptors.
550 	 * It must not exceed hardware maximum and minimum.
551 	 */
552 	if (sq_depth > HINIC_MAX_QUEUE_DEPTH ||
553 		sq_depth < HINIC_MIN_QUEUE_DEPTH) {
554 		PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
555 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
556 			  (int)nb_desc, (int)sq_depth,
557 			  (int)dev->data->port_id, (int)queue_idx);
558 		return -EINVAL;
559 	}
560 
561 	/*
562 	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
563 	 * descriptors are used or if the number of descriptors required
564 	 * to transmit a packet is greater than the number of free TX
565 	 * descriptors.
566 	 * The following constraints must be satisfied:
567 	 *  tx_free_thresh must be greater than 0.
568 	 *  tx_free_thresh must be less than the size of the ring minus 1.
569 	 * When set to zero use default values.
570 	 */
571 	tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ?
572 			tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH);
573 	if (tx_free_thresh >= (sq_depth - 1)) {
574 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)",
575 			(unsigned int)tx_free_thresh, (int)dev->data->port_id,
576 			(int)queue_idx);
577 		return -EINVAL;
578 	}
579 
580 	txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq),
581 				 RTE_CACHE_LINE_SIZE, socket_id);
582 	if (!txq) {
583 		PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s",
584 			    queue_idx, dev->data->name);
585 		return -ENOMEM;
586 	}
587 	nic_dev->txqs[queue_idx] = txq;
588 
589 	/* alloc tx sq hw wqepage */
590 	rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
591 	if (rc) {
592 		PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
593 			    queue_idx, dev->data->name, sq_depth);
594 		goto create_sq_fail;
595 	}
596 
597 	txq->q_id = queue_idx;
598 	txq->q_depth = sq_depth;
599 	txq->port_id = dev->data->port_id;
600 	txq->tx_free_thresh = tx_free_thresh;
601 	txq->nic_dev = nic_dev;
602 	txq->wq = &hwdev->nic_io->sq_wq[queue_idx];
603 	txq->sq = &hwdev->nic_io->qps[queue_idx].sq;
604 	txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr;
605 	txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq);
606 	txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
607 					sizeof(struct hinic_sq_bufdesc);
608 	txq->cos = nic_dev->default_cos;
609 	txq->socket_id = socket_id;
610 
611 	/* alloc software txinfo */
612 	rc = hinic_setup_tx_resources(txq);
613 	if (rc) {
614 		PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s",
615 			    queue_idx, dev->data->name);
616 		goto setup_tx_res_fail;
617 	}
618 
619 	/* record nic_dev txq in rte_eth tx_queues */
620 	dev->data->tx_queues[queue_idx] = txq;
621 
622 	return HINIC_OK;
623 
624 setup_tx_res_fail:
625 	hinic_destroy_sq(hwdev, queue_idx);
626 
627 create_sq_fail:
628 	rte_free(txq);
629 
630 	return rc;
631 }
632 
633 static void hinic_reset_tx_queue(struct rte_eth_dev *dev)
634 {
635 	struct hinic_nic_dev *nic_dev;
636 	struct hinic_txq *txq;
637 	struct hinic_nic_io *nic_io;
638 	struct hinic_hwdev *hwdev;
639 	volatile u32 *ci_addr;
640 	int q_id = 0;
641 
642 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
643 	hwdev = nic_dev->hwdev;
644 	nic_io = hwdev->nic_io;
645 
646 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
647 		txq = dev->data->tx_queues[q_id];
648 
649 		txq->wq->cons_idx = 0;
650 		txq->wq->prod_idx = 0;
651 		txq->wq->delta = txq->q_depth;
652 		txq->wq->mask  = txq->q_depth - 1;
653 
654 		/* clear hardware ci */
655 		ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base,
656 							q_id);
657 		*ci_addr = 0;
658 	}
659 }
660 
661 /**
662  * Get link speed from NIC.
663  *
664  * @param dev
665  *   Pointer to Ethernet device structure.
666  * @param speed_capa
667  *   Pointer to link speed structure.
668  */
669 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
670 {
671 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
672 	u32 supported_link, advertised_link;
673 	int err;
674 
675 #define HINIC_LINK_MODE_SUPPORT_1G	(1U << HINIC_GE_BASE_KX)
676 
677 #define HINIC_LINK_MODE_SUPPORT_10G	(1U << HINIC_10GE_BASE_KR)
678 
679 #define HINIC_LINK_MODE_SUPPORT_25G	((1U << HINIC_25GE_BASE_KR_S) | \
680 					(1U << HINIC_25GE_BASE_CR_S) | \
681 					(1U << HINIC_25GE_BASE_KR) | \
682 					(1U << HINIC_25GE_BASE_CR))
683 
684 #define HINIC_LINK_MODE_SUPPORT_40G	((1U << HINIC_40GE_BASE_KR4) | \
685 					(1U << HINIC_40GE_BASE_CR4))
686 
687 #define HINIC_LINK_MODE_SUPPORT_100G	((1U << HINIC_100GE_BASE_KR4) | \
688 					(1U << HINIC_100GE_BASE_CR4))
689 
690 	err = hinic_get_link_mode(nic_dev->hwdev,
691 				  &supported_link, &advertised_link);
692 	if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
693 	    advertised_link == HINIC_SUPPORTED_UNKNOWN) {
694 		PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u",
695 			  nic_dev->proc_dev_name, dev->data->port_id);
696 	} else {
697 		*speed_capa = 0;
698 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
699 			*speed_capa |= ETH_LINK_SPEED_1G;
700 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
701 			*speed_capa |= ETH_LINK_SPEED_10G;
702 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
703 			*speed_capa |= ETH_LINK_SPEED_25G;
704 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
705 			*speed_capa |= ETH_LINK_SPEED_40G;
706 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
707 			*speed_capa |= ETH_LINK_SPEED_100G;
708 	}
709 }
710 
711 /**
712  * DPDK callback to get information about the device.
713  *
714  * @param dev
715  *   Pointer to Ethernet device structure.
716  * @param info
717  *   Pointer to Info structure output buffer.
718  */
719 static int
720 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
721 {
722 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
723 
724 	info->max_rx_queues  = nic_dev->nic_cap.max_rqs;
725 	info->max_tx_queues  = nic_dev->nic_cap.max_sqs;
726 	info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE;
727 	info->max_rx_pktlen  = HINIC_MAX_JUMBO_FRAME_SIZE;
728 	info->max_mac_addrs  = HINIC_MAX_UC_MAC_ADDRS;
729 	info->min_mtu = HINIC_MIN_MTU_SIZE;
730 	info->max_mtu = HINIC_MAX_MTU_SIZE;
731 	info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE;
732 
733 	hinic_get_speed_capa(dev, &info->speed_capa);
734 	info->rx_queue_offload_capa = 0;
735 	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
736 				DEV_RX_OFFLOAD_IPV4_CKSUM |
737 				DEV_RX_OFFLOAD_UDP_CKSUM |
738 				DEV_RX_OFFLOAD_TCP_CKSUM |
739 				DEV_RX_OFFLOAD_VLAN_FILTER |
740 				DEV_RX_OFFLOAD_SCATTER |
741 				DEV_RX_OFFLOAD_JUMBO_FRAME |
742 				DEV_RX_OFFLOAD_TCP_LRO |
743 				DEV_RX_OFFLOAD_RSS_HASH;
744 
745 	info->tx_queue_offload_capa = 0;
746 	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
747 				DEV_TX_OFFLOAD_IPV4_CKSUM |
748 				DEV_TX_OFFLOAD_UDP_CKSUM |
749 				DEV_TX_OFFLOAD_TCP_CKSUM |
750 				DEV_TX_OFFLOAD_SCTP_CKSUM |
751 				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
752 				DEV_TX_OFFLOAD_TCP_TSO |
753 				DEV_TX_OFFLOAD_MULTI_SEGS;
754 
755 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
756 	info->reta_size = HINIC_RSS_INDIR_SIZE;
757 	info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
758 	info->rx_desc_lim = hinic_rx_desc_lim;
759 	info->tx_desc_lim = hinic_tx_desc_lim;
760 
761 	/* Driver-preferred Rx/Tx parameters */
762 	info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
763 	info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
764 	info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
765 	info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
766 	info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
767 	info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
768 
769 	return 0;
770 }
771 
772 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
773 				size_t fw_size)
774 {
775 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
776 	char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
777 	int err;
778 
779 	err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
780 	if (err) {
781 		PMD_DRV_LOG(ERR, "Failed to get fw version");
782 		return -EINVAL;
783 	}
784 
785 	if (fw_size < strlen(fw_ver) + 1)
786 		return (strlen(fw_ver) + 1);
787 
788 	snprintf(fw_version, fw_size, "%s", fw_ver);
789 
790 	return 0;
791 }
792 
793 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
794 {
795 	int err;
796 
797 	err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl);
798 	if (err) {
799 		PMD_DRV_LOG(ERR, "Failed to set rx mode");
800 		return -EINVAL;
801 	}
802 	nic_dev->rx_mode_status = rx_mode_ctrl;
803 
804 	return 0;
805 }
806 
807 static int hinic_rxtx_configure(struct rte_eth_dev *dev)
808 {
809 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
810 	int err;
811 
812 	/* rx configure, if rss enable, need to init default configuration */
813 	err = hinic_rx_configure(dev);
814 	if (err) {
815 		PMD_DRV_LOG(ERR, "Configure rss failed");
816 		return err;
817 	}
818 
819 	/* rx mode init */
820 	err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE);
821 	if (err) {
822 		PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed",
823 			HINIC_DEFAULT_RX_MODE);
824 		goto set_rx_mode_fail;
825 	}
826 
827 	return HINIC_OK;
828 
829 set_rx_mode_fail:
830 	hinic_rx_remove_configure(dev);
831 
832 	return err;
833 }
834 
835 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev)
836 {
837 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
838 
839 	(void)hinic_config_rx_mode(nic_dev, 0);
840 	hinic_rx_remove_configure(dev);
841 }
842 
843 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
844 					  struct rte_eth_link *link)
845 {
846 	int rc;
847 	u8 port_link_status = 0;
848 	struct nic_port_info port_link_info;
849 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
850 	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
851 					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
852 					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
853 					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
854 
855 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
856 	if (rc)
857 		return rc;
858 
859 	if (!port_link_status) {
860 		link->link_status = ETH_LINK_DOWN;
861 		link->link_speed = 0;
862 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
863 		link->link_autoneg = ETH_LINK_FIXED;
864 		return HINIC_OK;
865 	}
866 
867 	memset(&port_link_info, 0, sizeof(port_link_info));
868 	rc = hinic_get_port_info(nic_hwdev, &port_link_info);
869 	if (rc)
870 		return rc;
871 
872 	link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX];
873 	link->link_duplex = port_link_info.duplex;
874 	link->link_autoneg = port_link_info.autoneg_state;
875 	link->link_status = port_link_status;
876 
877 	return HINIC_OK;
878 }
879 
880 /**
881  * DPDK callback to retrieve physical link information.
882  *
883  * @param dev
884  *   Pointer to Ethernet device structure.
885  * @param wait_to_complete
886  *   Wait for request completion.
887  *
888  * @return
889  *   0 link status changed, -1 link status not changed
890  */
891 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
892 {
893 #define CHECK_INTERVAL 10  /* 10ms */
894 #define MAX_REPEAT_TIME 100  /* 1s (100 * 10ms) in total */
895 	int rc = HINIC_OK;
896 	struct rte_eth_link link;
897 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
898 	unsigned int rep_cnt = MAX_REPEAT_TIME;
899 
900 	memset(&link, 0, sizeof(link));
901 	do {
902 		/* Get link status information from hardware */
903 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
904 		if (rc != HINIC_OK) {
905 			link.link_speed = ETH_SPEED_NUM_NONE;
906 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
907 			PMD_DRV_LOG(ERR, "Get link status failed");
908 			goto out;
909 		}
910 
911 		if (!wait_to_complete || link.link_status)
912 			break;
913 
914 		rte_delay_ms(CHECK_INTERVAL);
915 	} while (rep_cnt--);
916 
917 out:
918 	rc = rte_eth_linkstatus_set(dev, &link);
919 	return rc;
920 }
921 
922 /**
923  * DPDK callback to bring the link UP.
924  *
925  * @param dev
926  *   Pointer to Ethernet device structure.
927  *
928  * @return
929  *   0 on success, negative errno value on failure.
930  */
931 static int hinic_dev_set_link_up(struct rte_eth_dev *dev)
932 {
933 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
934 	int ret;
935 
936 	/* link status follow phy port status, up will open pma */
937 	ret = hinic_set_port_enable(nic_dev->hwdev, true);
938 	if (ret)
939 		PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d",
940 			    nic_dev->proc_dev_name, dev->data->port_id);
941 
942 	return ret;
943 }
944 
945 /**
946  * DPDK callback to bring the link DOWN.
947  *
948  * @param dev
949  *   Pointer to Ethernet device structure.
950  *
951  * @return
952  *   0 on success, negative errno value on failure.
953  */
954 static int hinic_dev_set_link_down(struct rte_eth_dev *dev)
955 {
956 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
957 	int ret;
958 
959 	/* link status follow phy port status, up will close pma */
960 	ret = hinic_set_port_enable(nic_dev->hwdev, false);
961 	if (ret)
962 		PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d",
963 			    nic_dev->proc_dev_name, dev->data->port_id);
964 
965 	return ret;
966 }
967 
968 /**
969  * DPDK callback to start the device.
970  *
971  * @param dev
972  *   Pointer to Ethernet device structure.
973  *
974  * @return
975  *   0 on success, negative errno value on failure.
976  */
977 static int hinic_dev_start(struct rte_eth_dev *dev)
978 {
979 	int rc;
980 	char *name;
981 	struct hinic_nic_dev *nic_dev;
982 
983 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
984 	name = dev->data->name;
985 
986 	/* reset rx and tx queue */
987 	hinic_reset_rx_queue(dev);
988 	hinic_reset_tx_queue(dev);
989 
990 	/* get func rx buf size */
991 	hinic_get_func_rx_buf_size(nic_dev);
992 
993 	/* init txq and rxq context */
994 	rc = hinic_init_qp_ctxts(nic_dev->hwdev);
995 	if (rc) {
996 		PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s",
997 			    name);
998 		goto init_qp_fail;
999 	}
1000 
1001 	/* rss template */
1002 	rc = hinic_config_mq_mode(dev, TRUE);
1003 	if (rc) {
1004 		PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s",
1005 			    name);
1006 		goto cfg_mq_mode_fail;
1007 	}
1008 
1009 	/* set default mtu */
1010 	rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size);
1011 	if (rc) {
1012 		PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s",
1013 			    nic_dev->mtu_size, name);
1014 		goto set_mtu_fail;
1015 	}
1016 
1017 	/* configure rss rx_mode and other rx or tx default feature */
1018 	rc = hinic_rxtx_configure(dev);
1019 	if (rc) {
1020 		PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s",
1021 			    name);
1022 		goto cfg_rxtx_fail;
1023 	}
1024 
1025 	/* reactive pf status, so that uP report asyn event */
1026 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
1027 
1028 	/* open virtual port and ready to start packet receiving */
1029 	rc = hinic_set_vport_enable(nic_dev->hwdev, true);
1030 	if (rc) {
1031 		PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name);
1032 		goto en_vport_fail;
1033 	}
1034 
1035 	/* open physical port and start packet receiving */
1036 	rc = hinic_set_port_enable(nic_dev->hwdev, true);
1037 	if (rc) {
1038 		PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s",
1039 			    name);
1040 		goto en_port_fail;
1041 	}
1042 
1043 	/* update eth_dev link status */
1044 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1045 		(void)hinic_link_update(dev, 0);
1046 
1047 	rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status);
1048 
1049 	return 0;
1050 
1051 en_port_fail:
1052 	(void)hinic_set_vport_enable(nic_dev->hwdev, false);
1053 
1054 en_vport_fail:
1055 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT);
1056 
1057 	/* Flush tx && rx chip resources in case of set vport fake fail */
1058 	(void)hinic_flush_qp_res(nic_dev->hwdev);
1059 	rte_delay_ms(100);
1060 
1061 	hinic_remove_rxtx_configure(dev);
1062 
1063 cfg_rxtx_fail:
1064 set_mtu_fail:
1065 cfg_mq_mode_fail:
1066 	hinic_free_qp_ctxts(nic_dev->hwdev);
1067 
1068 init_qp_fail:
1069 	hinic_free_all_rx_mbuf(dev);
1070 	hinic_free_all_tx_mbuf(dev);
1071 
1072 	return rc;
1073 }
1074 
1075 /**
1076  * DPDK callback to release the receive queue.
1077  *
1078  * @param dev
1079  *   Pointer to Ethernet device structure.
1080  * @param qid
1081  *   Receive queue index.
1082  */
1083 static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1084 {
1085 	struct hinic_rxq *rxq = dev->data->rx_queues[qid];
1086 	struct hinic_nic_dev *nic_dev;
1087 
1088 	if (!rxq) {
1089 		PMD_DRV_LOG(WARNING, "Rxq is null when release");
1090 		return;
1091 	}
1092 	nic_dev = rxq->nic_dev;
1093 
1094 	/* free rxq_pkt mbuf */
1095 	hinic_free_all_rx_mbufs(rxq);
1096 
1097 	/* free rxq_cqe, rxq_info */
1098 	hinic_free_rx_resources(rxq);
1099 
1100 	/* free root rq wq */
1101 	hinic_destroy_rq(nic_dev->hwdev, rxq->q_id);
1102 
1103 	nic_dev->rxqs[rxq->q_id] = NULL;
1104 
1105 	/* free rxq */
1106 	rte_free(rxq);
1107 }
1108 
1109 /**
1110  * DPDK callback to release the transmit queue.
1111  *
1112  * @param dev
1113  *   Pointer to Ethernet device structure.
1114  * @param qid
1115  *   Transmit queue index.
1116  */
1117 static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1118 {
1119 	struct hinic_txq *txq = dev->data->tx_queues[qid];
1120 	struct hinic_nic_dev *nic_dev;
1121 
1122 	if (!txq) {
1123 		PMD_DRV_LOG(WARNING, "Txq is null when release");
1124 		return;
1125 	}
1126 	nic_dev = txq->nic_dev;
1127 
1128 	/* free txq_pkt mbuf */
1129 	hinic_free_all_tx_mbufs(txq);
1130 
1131 	/* free txq_info */
1132 	hinic_free_tx_resources(txq);
1133 
1134 	/* free root sq wq */
1135 	hinic_destroy_sq(nic_dev->hwdev, txq->q_id);
1136 	nic_dev->txqs[txq->q_id] = NULL;
1137 
1138 	/* free txq */
1139 	rte_free(txq);
1140 }
1141 
1142 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev)
1143 {
1144 	u16 q_id;
1145 
1146 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
1147 		hinic_destroy_rq(nic_dev->hwdev, q_id);
1148 }
1149 
1150 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev)
1151 {
1152 	u16 q_id;
1153 
1154 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
1155 		hinic_destroy_sq(nic_dev->hwdev, q_id);
1156 }
1157 
1158 /**
1159  * DPDK callback to stop the device.
1160  *
1161  * @param dev
1162  *   Pointer to Ethernet device structure.
1163  */
1164 static int hinic_dev_stop(struct rte_eth_dev *dev)
1165 {
1166 	int rc;
1167 	char *name;
1168 	uint16_t port_id;
1169 	struct hinic_nic_dev *nic_dev;
1170 	struct rte_eth_link link;
1171 
1172 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1173 	name = dev->data->name;
1174 	port_id = dev->data->port_id;
1175 
1176 	dev->data->dev_started = 0;
1177 
1178 	if (!rte_bit_relaxed_test_and_clear32(HINIC_DEV_START,
1179 					      &nic_dev->dev_status)) {
1180 		PMD_DRV_LOG(INFO, "Device %s already stopped", name);
1181 		return 0;
1182 	}
1183 
1184 	/* just stop phy port and vport */
1185 	rc = hinic_set_port_enable(nic_dev->hwdev, false);
1186 	if (rc)
1187 		PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d",
1188 			  rc, name, port_id);
1189 
1190 	rc = hinic_set_vport_enable(nic_dev->hwdev, false);
1191 	if (rc)
1192 		PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d",
1193 			  rc, name, port_id);
1194 
1195 	/* Clear recorded link status */
1196 	memset(&link, 0, sizeof(link));
1197 	(void)rte_eth_linkstatus_set(dev, &link);
1198 
1199 	/* flush pending io request */
1200 	rc = hinic_rx_tx_flush(nic_dev->hwdev);
1201 	if (rc)
1202 		PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d",
1203 			    rc, name, port_id);
1204 
1205 	/* clean rss table and rx_mode */
1206 	hinic_remove_rxtx_configure(dev);
1207 
1208 	/* clean root context */
1209 	hinic_free_qp_ctxts(nic_dev->hwdev);
1210 
1211 	hinic_destroy_fdir_filter(dev);
1212 
1213 	/* free mbuf */
1214 	hinic_free_all_rx_mbuf(dev);
1215 	hinic_free_all_tx_mbuf(dev);
1216 
1217 	return 0;
1218 }
1219 
1220 static void hinic_disable_interrupt(struct rte_eth_dev *dev)
1221 {
1222 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1223 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1224 	int ret, retries = 0;
1225 
1226 	rte_bit_relaxed_clear32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
1227 
1228 	/* disable msix interrupt in hardware */
1229 	hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE);
1230 
1231 	/* disable rte interrupt */
1232 	ret = rte_intr_disable(&pci_dev->intr_handle);
1233 	if (ret)
1234 		PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret);
1235 
1236 	do {
1237 		ret =
1238 		rte_intr_callback_unregister(&pci_dev->intr_handle,
1239 					     hinic_dev_interrupt_handler, dev);
1240 		if (ret >= 0) {
1241 			break;
1242 		} else if (ret == -EAGAIN) {
1243 			rte_delay_ms(100);
1244 			retries++;
1245 		} else {
1246 			PMD_DRV_LOG(ERR, "intr callback unregister failed: %d",
1247 				    ret);
1248 			break;
1249 		}
1250 	} while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES);
1251 
1252 	if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES)
1253 		PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries",
1254 			    retries);
1255 
1256 	rte_bit_relaxed_clear32(HINIC_DEV_INIT, &nic_dev->dev_status);
1257 }
1258 
1259 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable)
1260 {
1261 	u32 rx_mode_ctrl;
1262 	int err;
1263 
1264 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1265 	if (err)
1266 		return err;
1267 
1268 	rx_mode_ctrl = nic_dev->rx_mode_status;
1269 
1270 	if (enable)
1271 		rx_mode_ctrl |= HINIC_RX_MODE_PROMISC;
1272 	else
1273 		rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC);
1274 
1275 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1276 
1277 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1278 
1279 	return err;
1280 }
1281 
1282 /**
1283  * DPDK callback to get device statistics.
1284  *
1285  * @param dev
1286  *   Pointer to Ethernet device structure.
1287  * @param stats
1288  *   Stats structure output buffer.
1289  *
1290  * @return
1291  *   0 on success and stats is filled,
1292  *   negative error value otherwise.
1293  */
1294 static int
1295 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1296 {
1297 	int i, err, q_num;
1298 	u64 rx_discards_pmd = 0;
1299 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1300 	struct hinic_vport_stats vport_stats;
1301 	struct hinic_rxq	*rxq = NULL;
1302 	struct hinic_rxq_stats rxq_stats;
1303 	struct hinic_txq	*txq = NULL;
1304 	struct hinic_txq_stats txq_stats;
1305 
1306 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
1307 	if (err) {
1308 		PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s",
1309 			nic_dev->proc_dev_name);
1310 		return err;
1311 	}
1312 
1313 	dev->data->rx_mbuf_alloc_failed = 0;
1314 
1315 	/* rx queue stats */
1316 	q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1317 			nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1318 	for (i = 0; i < q_num; i++) {
1319 		rxq = nic_dev->rxqs[i];
1320 		hinic_rxq_get_stats(rxq, &rxq_stats);
1321 		stats->q_ipackets[i] = rxq_stats.packets;
1322 		stats->q_ibytes[i] = rxq_stats.bytes;
1323 		stats->q_errors[i] = rxq_stats.rx_discards;
1324 
1325 		stats->ierrors += rxq_stats.errors;
1326 		rx_discards_pmd += rxq_stats.rx_discards;
1327 		dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf;
1328 	}
1329 
1330 	/* tx queue stats */
1331 	q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1332 		nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1333 	for (i = 0; i < q_num; i++) {
1334 		txq = nic_dev->txqs[i];
1335 		hinic_txq_get_stats(txq, &txq_stats);
1336 		stats->q_opackets[i] = txq_stats.packets;
1337 		stats->q_obytes[i] = txq_stats.bytes;
1338 		stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs);
1339 	}
1340 
1341 	/* vport stats */
1342 	stats->oerrors += vport_stats.tx_discard_vport;
1343 
1344 	stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd;
1345 
1346 	stats->ipackets = (vport_stats.rx_unicast_pkts_vport +
1347 			vport_stats.rx_multicast_pkts_vport +
1348 			vport_stats.rx_broadcast_pkts_vport -
1349 			rx_discards_pmd);
1350 
1351 	stats->opackets = (vport_stats.tx_unicast_pkts_vport +
1352 			vport_stats.tx_multicast_pkts_vport +
1353 			vport_stats.tx_broadcast_pkts_vport);
1354 
1355 	stats->ibytes = (vport_stats.rx_unicast_bytes_vport +
1356 			vport_stats.rx_multicast_bytes_vport +
1357 			vport_stats.rx_broadcast_bytes_vport);
1358 
1359 	stats->obytes = (vport_stats.tx_unicast_bytes_vport +
1360 			vport_stats.tx_multicast_bytes_vport +
1361 			vport_stats.tx_broadcast_bytes_vport);
1362 	return 0;
1363 }
1364 
1365 /**
1366  * DPDK callback to clear device statistics.
1367  *
1368  * @param dev
1369  *   Pointer to Ethernet device structure.
1370  */
1371 static int hinic_dev_stats_reset(struct rte_eth_dev *dev)
1372 {
1373 	int qid;
1374 	struct hinic_rxq	*rxq = NULL;
1375 	struct hinic_txq	*txq = NULL;
1376 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1377 	int ret;
1378 
1379 	ret = hinic_clear_vport_stats(nic_dev->hwdev);
1380 	if (ret != 0)
1381 		return ret;
1382 
1383 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
1384 		rxq = nic_dev->rxqs[qid];
1385 		hinic_rxq_stats_reset(rxq);
1386 	}
1387 
1388 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
1389 		txq = nic_dev->txqs[qid];
1390 		hinic_txq_stats_reset(txq);
1391 	}
1392 
1393 	return 0;
1394 }
1395 
1396 /**
1397  * DPDK callback to clear device extended statistics.
1398  *
1399  * @param dev
1400  *   Pointer to Ethernet device structure.
1401  */
1402 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev)
1403 {
1404 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1405 	int ret;
1406 
1407 	ret = hinic_dev_stats_reset(dev);
1408 	if (ret != 0)
1409 		return ret;
1410 
1411 	if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) {
1412 		ret = hinic_clear_phy_port_stats(nic_dev->hwdev);
1413 		if (ret != 0)
1414 			return ret;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr)
1421 {
1422 	uint64_t random_value;
1423 
1424 	/* Set Organizationally Unique Identifier (OUI) prefix */
1425 	mac_addr->addr_bytes[0] = 0x00;
1426 	mac_addr->addr_bytes[1] = 0x09;
1427 	mac_addr->addr_bytes[2] = 0xC0;
1428 	/* Force indication of locally assigned MAC address. */
1429 	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1430 	/* Generate the last 3 bytes of the MAC address with a random number. */
1431 	random_value = rte_rand();
1432 	memcpy(&mac_addr->addr_bytes[3], &random_value, 3);
1433 }
1434 
1435 /**
1436  * Init mac_vlan table in NIC.
1437  *
1438  * @param dev
1439  *   Pointer to Ethernet device structure.
1440  *
1441  * @return
1442  *   0 on success and stats is filled,
1443  *   negative error value otherwise.
1444  */
1445 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev)
1446 {
1447 	struct hinic_nic_dev *nic_dev =
1448 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1449 	uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
1450 	u16 func_id = 0;
1451 	int rc = 0;
1452 
1453 	rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes);
1454 	if (rc)
1455 		return rc;
1456 
1457 	rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes,
1458 		&eth_dev->data->mac_addrs[0]);
1459 	if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[0]))
1460 		hinic_gen_random_mac_addr(&eth_dev->data->mac_addrs[0]);
1461 
1462 	func_id = hinic_global_func_id(nic_dev->hwdev);
1463 	rc = hinic_set_mac(nic_dev->hwdev,
1464 			eth_dev->data->mac_addrs[0].addr_bytes,
1465 			0, func_id);
1466 	if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1467 		return rc;
1468 
1469 	rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
1470 			&nic_dev->default_addr);
1471 
1472 	return 0;
1473 }
1474 
1475 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev)
1476 {
1477 	u16 func_id;
1478 	u32 i;
1479 
1480 	func_id = hinic_global_func_id(nic_dev->hwdev);
1481 
1482 	for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) {
1483 		if (rte_is_zero_ether_addr(&nic_dev->mc_list[i]))
1484 			break;
1485 
1486 		hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes,
1487 			      0, func_id);
1488 		memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr));
1489 	}
1490 }
1491 
1492 /**
1493  * Deinit mac_vlan table in NIC.
1494  *
1495  * @param dev
1496  *   Pointer to Ethernet device structure.
1497  *
1498  * @return
1499  *   0 on success and stats is filled,
1500  *   negative error value otherwise.
1501  */
1502 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
1503 {
1504 	struct hinic_nic_dev *nic_dev =
1505 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1506 	u16 func_id = 0;
1507 	int rc;
1508 	int i;
1509 
1510 	func_id = hinic_global_func_id(nic_dev->hwdev);
1511 
1512 	for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) {
1513 		if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[i]))
1514 			continue;
1515 
1516 		rc = hinic_del_mac(nic_dev->hwdev,
1517 				   eth_dev->data->mac_addrs[i].addr_bytes,
1518 				   0, func_id);
1519 		if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1520 			PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s",
1521 				    eth_dev->data->name);
1522 
1523 		memset(&eth_dev->data->mac_addrs[i], 0,
1524 		       sizeof(struct rte_ether_addr));
1525 	}
1526 
1527 	/* delete multicast mac addrs */
1528 	hinic_delete_mc_addr_list(nic_dev);
1529 
1530 	rte_free(nic_dev->mc_list);
1531 
1532 }
1533 
1534 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1535 {
1536 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1537 	uint32_t frame_size;
1538 	int ret = 0;
1539 
1540 	PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
1541 			dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
1542 
1543 	if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
1544 		PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
1545 				mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
1546 		return -EINVAL;
1547 	}
1548 
1549 	ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
1550 	if (ret) {
1551 		PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
1552 		return ret;
1553 	}
1554 
1555 	/* update max frame size */
1556 	frame_size = HINIC_MTU_TO_PKTLEN(mtu);
1557 	if (frame_size > HINIC_ETH_MAX_LEN)
1558 		dev->data->dev_conf.rxmode.offloads |=
1559 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1560 	else
1561 		dev->data->dev_conf.rxmode.offloads &=
1562 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1563 
1564 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1565 	nic_dev->mtu_size = mtu;
1566 
1567 	return ret;
1568 }
1569 
1570 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev,
1571 					u16 vlan_id, bool on)
1572 {
1573 	u32 vid_idx, vid_bit;
1574 
1575 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1576 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1577 
1578 	if (on)
1579 		nic_dev->vfta[vid_idx] |= vid_bit;
1580 	else
1581 		nic_dev->vfta[vid_idx] &= ~vid_bit;
1582 }
1583 
1584 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev,
1585 				uint16_t vlan_id)
1586 {
1587 	u32 vid_idx, vid_bit;
1588 
1589 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1590 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1591 
1592 	return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE;
1593 }
1594 
1595 /**
1596  * DPDK callback to set vlan filter.
1597  *
1598  * @param dev
1599  *   Pointer to Ethernet device structure.
1600  * @param vlan_id
1601  *   vlan id is used to filter vlan packets
1602  * @param enable
1603  *   enable disable or enable vlan filter function
1604  */
1605 static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
1606 				uint16_t vlan_id, int enable)
1607 {
1608 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1609 	int err = 0;
1610 	u16 func_id;
1611 
1612 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
1613 		return -EINVAL;
1614 
1615 	if (vlan_id == 0)
1616 		return 0;
1617 
1618 	func_id = hinic_global_func_id(nic_dev->hwdev);
1619 
1620 	if (enable) {
1621 		/* If vlanid is already set, just return */
1622 		if (hinic_find_vlan_filter(nic_dev, vlan_id)) {
1623 			PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s",
1624 				  vlan_id, nic_dev->proc_dev_name);
1625 			return 0;
1626 		}
1627 
1628 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1629 					    func_id, TRUE);
1630 	} else {
1631 		/* If vlanid can't be found, just return */
1632 		if (!hinic_find_vlan_filter(nic_dev, vlan_id)) {
1633 			PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s",
1634 				  vlan_id, nic_dev->proc_dev_name);
1635 			return 0;
1636 		}
1637 
1638 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1639 					    func_id, FALSE);
1640 	}
1641 
1642 	if (err) {
1643 		PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d",
1644 		      enable ? "Add" : "Remove", func_id, vlan_id, err);
1645 		return err;
1646 	}
1647 
1648 	hinic_store_vlan_filter(nic_dev, vlan_id, enable);
1649 
1650 	PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s",
1651 		  enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name);
1652 	return 0;
1653 }
1654 
1655 /**
1656  * DPDK callback to enable or disable vlan offload.
1657  *
1658  * @param dev
1659  *   Pointer to Ethernet device structure.
1660  * @param mask
1661  *   Definitions used for VLAN setting
1662  */
1663 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1664 {
1665 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1666 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1667 	bool on;
1668 	int err;
1669 
1670 	/* Enable or disable VLAN filter */
1671 	if (mask & ETH_VLAN_FILTER_MASK) {
1672 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
1673 			TRUE : FALSE;
1674 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
1675 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1676 			PMD_DRV_LOG(WARNING,
1677 				"Current matching version does not support vlan filter configuration, device: %s, port_id: %d",
1678 				  nic_dev->proc_dev_name, dev->data->port_id);
1679 		} else if (err) {
1680 			PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d",
1681 				  on ? "enable" : "disable",
1682 				  nic_dev->proc_dev_name,
1683 				  dev->data->port_id, err);
1684 			return err;
1685 		}
1686 
1687 		PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d",
1688 			  on ? "Enable" : "Disable",
1689 			  nic_dev->proc_dev_name, dev->data->port_id);
1690 	}
1691 
1692 	/* Enable or disable VLAN stripping */
1693 	if (mask & ETH_VLAN_STRIP_MASK) {
1694 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
1695 			TRUE : FALSE;
1696 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
1697 		if (err) {
1698 			PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d",
1699 				  on ? "enable" : "disable",
1700 				  nic_dev->proc_dev_name,
1701 				  dev->data->port_id, err);
1702 			return err;
1703 		}
1704 
1705 		PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d",
1706 			  on ? "Enable" : "Disable",
1707 			  nic_dev->proc_dev_name, dev->data->port_id);
1708 	}
1709 
1710 	return 0;
1711 }
1712 
1713 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev)
1714 {
1715 	struct hinic_nic_dev *nic_dev =
1716 		HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1717 	u16 func_id;
1718 	int i;
1719 
1720 	func_id = hinic_global_func_id(nic_dev->hwdev);
1721 	for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) {
1722 		/* If can't find it, continue */
1723 		if (!hinic_find_vlan_filter(nic_dev, i))
1724 			continue;
1725 
1726 		(void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE);
1727 		hinic_store_vlan_filter(nic_dev, i, false);
1728 	}
1729 }
1730 
1731 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev,
1732 				bool enable)
1733 {
1734 	u32 rx_mode_ctrl;
1735 	int err;
1736 
1737 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1738 	if (err)
1739 		return err;
1740 
1741 	rx_mode_ctrl = nic_dev->rx_mode_status;
1742 
1743 	if (enable)
1744 		rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL;
1745 	else
1746 		rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL);
1747 
1748 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1749 
1750 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1751 
1752 	return err;
1753 }
1754 
1755 /**
1756  * DPDK callback to enable allmulticast mode.
1757  *
1758  * @param dev
1759  *   Pointer to Ethernet device structure.
1760  *
1761  * @return
1762  *   0 on success,
1763  *   negative error value otherwise.
1764  */
1765 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev)
1766 {
1767 	int ret = HINIC_OK;
1768 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1769 
1770 	ret = hinic_set_dev_allmulticast(nic_dev, true);
1771 	if (ret) {
1772 		PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret);
1773 		return ret;
1774 	}
1775 
1776 	PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d",
1777 		nic_dev->proc_dev_name, dev->data->port_id);
1778 	return 0;
1779 }
1780 
1781 /**
1782  * DPDK callback to disable allmulticast mode.
1783  *
1784  * @param dev
1785  *   Pointer to Ethernet device structure.
1786  *
1787  * @return
1788  *   0 on success,
1789  *   negative error value otherwise.
1790  */
1791 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev)
1792 {
1793 	int ret = HINIC_OK;
1794 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1795 
1796 	ret = hinic_set_dev_allmulticast(nic_dev, false);
1797 	if (ret) {
1798 		PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret);
1799 		return ret;
1800 	}
1801 
1802 	PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d",
1803 		nic_dev->proc_dev_name, dev->data->port_id);
1804 	return 0;
1805 }
1806 
1807 /**
1808  * DPDK callback to enable promiscuous mode.
1809  *
1810  * @param dev
1811  *   Pointer to Ethernet device structure.
1812  *
1813  * @return
1814  *   0 on success,
1815  *   negative error value otherwise.
1816  */
1817 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev)
1818 {
1819 	int rc = HINIC_OK;
1820 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1821 
1822 	PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1823 		    nic_dev->proc_dev_name, dev->data->port_id,
1824 		    dev->data->promiscuous);
1825 
1826 	rc = hinic_set_dev_promiscuous(nic_dev, true);
1827 	if (rc)
1828 		PMD_DRV_LOG(ERR, "Enable promiscuous failed");
1829 
1830 	return rc;
1831 }
1832 
1833 /**
1834  * DPDK callback to disable promiscuous mode.
1835  *
1836  * @param dev
1837  *   Pointer to Ethernet device structure.
1838  *
1839  * @return
1840  *   0 on success,
1841  *   negative error value otherwise.
1842  */
1843 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev)
1844 {
1845 	int rc = HINIC_OK;
1846 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1847 
1848 	PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1849 		    nic_dev->proc_dev_name, dev->data->port_id,
1850 		    dev->data->promiscuous);
1851 
1852 	rc = hinic_set_dev_promiscuous(nic_dev, false);
1853 	if (rc)
1854 		PMD_DRV_LOG(ERR, "Disable promiscuous failed");
1855 
1856 	return rc;
1857 }
1858 
1859 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
1860 			struct rte_eth_fc_conf *fc_conf)
1861 {
1862 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1863 	struct nic_pause_config nic_pause;
1864 	int err;
1865 
1866 	memset(&nic_pause, 0, sizeof(nic_pause));
1867 
1868 	err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
1869 	if (err)
1870 		return err;
1871 
1872 	if (nic_dev->pause_set || !nic_pause.auto_neg) {
1873 		nic_pause.rx_pause = nic_dev->nic_pause.rx_pause;
1874 		nic_pause.tx_pause = nic_dev->nic_pause.tx_pause;
1875 	}
1876 
1877 	fc_conf->autoneg = nic_pause.auto_neg;
1878 
1879 	if (nic_pause.tx_pause && nic_pause.rx_pause)
1880 		fc_conf->mode = RTE_FC_FULL;
1881 	else if (nic_pause.tx_pause)
1882 		fc_conf->mode = RTE_FC_TX_PAUSE;
1883 	else if (nic_pause.rx_pause)
1884 		fc_conf->mode = RTE_FC_RX_PAUSE;
1885 	else
1886 		fc_conf->mode = RTE_FC_NONE;
1887 
1888 	return 0;
1889 }
1890 
1891 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
1892 			struct rte_eth_fc_conf *fc_conf)
1893 {
1894 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1895 	struct nic_pause_config nic_pause;
1896 	int err;
1897 
1898 	nic_pause.auto_neg = fc_conf->autoneg;
1899 
1900 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1901 		(fc_conf->mode & RTE_FC_TX_PAUSE))
1902 		nic_pause.tx_pause = true;
1903 	else
1904 		nic_pause.tx_pause = false;
1905 
1906 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1907 		(fc_conf->mode & RTE_FC_RX_PAUSE))
1908 		nic_pause.rx_pause = true;
1909 	else
1910 		nic_pause.rx_pause = false;
1911 
1912 	err = hinic_set_pause_config(nic_dev->hwdev, nic_pause);
1913 	if (err)
1914 		return err;
1915 
1916 	nic_dev->pause_set = true;
1917 	nic_dev->nic_pause.auto_neg = nic_pause.auto_neg;
1918 	nic_dev->nic_pause.rx_pause = nic_pause.rx_pause;
1919 	nic_dev->nic_pause.tx_pause = nic_pause.tx_pause;
1920 
1921 	PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n",
1922 		nic_pause.tx_pause ? "on" : "off",
1923 		nic_pause.rx_pause ? "on" : "off",
1924 		nic_pause.auto_neg ? "on" : "off");
1925 
1926 	return 0;
1927 }
1928 
1929 /**
1930  * DPDK callback to update the RSS hash key and RSS hash type.
1931  *
1932  * @param dev
1933  *   Pointer to Ethernet device structure.
1934  * @param rss_conf
1935  *   RSS configuration data.
1936  *
1937  * @return
1938  *   0 on success, negative error value otherwise.
1939  */
1940 static int hinic_rss_hash_update(struct rte_eth_dev *dev,
1941 			  struct rte_eth_rss_conf *rss_conf)
1942 {
1943 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1944 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1945 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
1946 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
1947 	u64 rss_hf = rss_conf->rss_hf;
1948 	struct nic_rss_type rss_type = {0};
1949 	int err = 0;
1950 
1951 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
1952 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
1953 		return HINIC_OK;
1954 	}
1955 
1956 	if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) {
1957 		PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d",
1958 			    rss_conf->rss_key_len);
1959 		return HINIC_ERROR;
1960 	}
1961 
1962 	if (rss_conf->rss_key) {
1963 		memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
1964 		err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx,
1965 						 hashkey);
1966 		if (err) {
1967 			PMD_DRV_LOG(ERR, "Set rss template table failed");
1968 			goto disable_rss;
1969 		}
1970 	}
1971 
1972 	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
1973 	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
1974 	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
1975 	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
1976 	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
1977 	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
1978 	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
1979 	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
1980 
1981 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
1982 	if (err) {
1983 		PMD_DRV_LOG(ERR, "Set rss type table failed");
1984 		goto disable_rss;
1985 	}
1986 
1987 	return 0;
1988 
1989 disable_rss:
1990 	memset(prio_tc, 0, sizeof(prio_tc));
1991 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
1992 	return err;
1993 }
1994 
1995 /**
1996  * DPDK callback to get the RSS hash configuration.
1997  *
1998  * @param dev
1999  *   Pointer to Ethernet device structure.
2000  * @param rss_conf
2001  *   RSS configuration data.
2002  *
2003  * @return
2004  *   0 on success, negative error value otherwise.
2005  */
2006 static int hinic_rss_conf_get(struct rte_eth_dev *dev,
2007 		       struct rte_eth_rss_conf *rss_conf)
2008 {
2009 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2010 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2011 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
2012 	struct nic_rss_type rss_type = {0};
2013 	int err;
2014 
2015 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
2016 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
2017 		return HINIC_ERROR;
2018 	}
2019 
2020 	err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
2021 	if (err)
2022 		return err;
2023 
2024 	if (rss_conf->rss_key &&
2025 	    rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) {
2026 		memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey));
2027 		rss_conf->rss_key_len = sizeof(hashkey);
2028 	}
2029 
2030 	err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type);
2031 	if (err)
2032 		return err;
2033 
2034 	rss_conf->rss_hf = 0;
2035 	rss_conf->rss_hf |=  rss_type.ipv4 ?
2036 		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
2037 	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2038 	rss_conf->rss_hf |=  rss_type.ipv6 ?
2039 		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
2040 	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
2041 	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2042 	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
2043 	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2044 	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2045 
2046 	return HINIC_OK;
2047 }
2048 
2049 /**
2050  * DPDK callback to update the RSS redirection table.
2051  *
2052  * @param dev
2053  *   Pointer to Ethernet device structure.
2054  * @param reta_conf
2055  *   Pointer to RSS reta configuration data.
2056  * @param reta_size
2057  *   Size of the RETA table.
2058  *
2059  * @return
2060  *   0 on success, negative error value otherwise.
2061  */
2062 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
2063 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2064 			      uint16_t reta_size)
2065 {
2066 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2067 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2068 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
2069 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2070 	int err = 0;
2071 	u16 i = 0;
2072 	u16 idx, shift;
2073 
2074 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
2075 		return HINIC_OK;
2076 
2077 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2078 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2079 		return HINIC_ERROR;
2080 	}
2081 
2082 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2083 	if (err)
2084 		return err;
2085 
2086 	/* update rss indir_tbl */
2087 	for (i = 0; i < reta_size; i++) {
2088 		idx = i / RTE_RETA_GROUP_SIZE;
2089 		shift = i % RTE_RETA_GROUP_SIZE;
2090 
2091 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
2092 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
2093 				"exceeds the maximum rxq num: %d", i,
2094 				reta_conf[idx].reta[shift], nic_dev->num_rq);
2095 			return -EINVAL;
2096 		}
2097 
2098 		if (reta_conf[idx].mask & (1ULL << shift))
2099 			indirtbl[i] = reta_conf[idx].reta[shift];
2100 	}
2101 
2102 	err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2103 	if (err)
2104 		goto disable_rss;
2105 
2106 	nic_dev->rss_indir_flag = true;
2107 
2108 	return 0;
2109 
2110 disable_rss:
2111 	memset(prio_tc, 0, sizeof(prio_tc));
2112 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
2113 
2114 	return HINIC_ERROR;
2115 }
2116 
2117 /**
2118  * DPDK callback to get the RSS indirection table.
2119  *
2120  * @param dev
2121  *   Pointer to Ethernet device structure.
2122  * @param reta_conf
2123  *   Pointer to RSS reta configuration data.
2124  * @param reta_size
2125  *   Size of the RETA table.
2126  *
2127  * @return
2128  *   0 on success, negative error value otherwise.
2129  */
2130 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
2131 			     struct rte_eth_rss_reta_entry64 *reta_conf,
2132 			     uint16_t reta_size)
2133 {
2134 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2135 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2136 	int err = 0;
2137 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2138 	u16 idx, shift;
2139 	u16 i = 0;
2140 
2141 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2142 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2143 		return HINIC_ERROR;
2144 	}
2145 
2146 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2147 	if (err) {
2148 		PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d",
2149 			    err);
2150 		return err;
2151 	}
2152 
2153 	for (i = 0; i < reta_size; i++) {
2154 		idx = i / RTE_RETA_GROUP_SIZE;
2155 		shift = i % RTE_RETA_GROUP_SIZE;
2156 		if (reta_conf[idx].mask & (1ULL << shift))
2157 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
2158 	}
2159 
2160 	return HINIC_OK;
2161 }
2162 
2163 /**
2164  * DPDK callback to get extended device statistics.
2165  *
2166  * @param dev
2167  *   Pointer to Ethernet device.
2168  * @param xstats
2169  *   Pointer to rte extended stats table.
2170  * @param n
2171  *   The size of the stats table.
2172  *
2173  * @return
2174  *   Number of extended stats on success and stats is filled,
2175  *   negative error value otherwise.
2176  */
2177 static int hinic_dev_xstats_get(struct rte_eth_dev *dev,
2178 			 struct rte_eth_xstat *xstats,
2179 			 unsigned int n)
2180 {
2181 	u16 qid = 0;
2182 	u32 i;
2183 	int err, count;
2184 	struct hinic_nic_dev *nic_dev;
2185 	struct hinic_phy_port_stats port_stats;
2186 	struct hinic_vport_stats vport_stats;
2187 	struct hinic_rxq	*rxq = NULL;
2188 	struct hinic_rxq_stats rxq_stats;
2189 	struct hinic_txq	*txq = NULL;
2190 	struct hinic_txq_stats txq_stats;
2191 
2192 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2193 	count = hinic_xstats_calc_num(nic_dev);
2194 	if ((int)n < count)
2195 		return count;
2196 
2197 	count = 0;
2198 
2199 	/* Get stats from hinic_rxq_stats */
2200 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
2201 		rxq = nic_dev->rxqs[qid];
2202 		hinic_rxq_get_stats(rxq, &rxq_stats);
2203 
2204 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2205 			xstats[count].value =
2206 				*(uint64_t *)(((char *)&rxq_stats) +
2207 				hinic_rxq_stats_strings[i].offset);
2208 			xstats[count].id = count;
2209 			count++;
2210 		}
2211 	}
2212 
2213 	/* Get stats from hinic_txq_stats */
2214 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
2215 		txq = nic_dev->txqs[qid];
2216 		hinic_txq_get_stats(txq, &txq_stats);
2217 
2218 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2219 			xstats[count].value =
2220 				*(uint64_t *)(((char *)&txq_stats) +
2221 				hinic_txq_stats_strings[i].offset);
2222 			xstats[count].id = count;
2223 			count++;
2224 		}
2225 	}
2226 
2227 	/* Get stats from hinic_vport_stats */
2228 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
2229 	if (err)
2230 		return err;
2231 
2232 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2233 		xstats[count].value =
2234 			*(uint64_t *)(((char *)&vport_stats) +
2235 			hinic_vport_stats_strings[i].offset);
2236 		xstats[count].id = count;
2237 		count++;
2238 	}
2239 
2240 	if (HINIC_IS_VF(nic_dev->hwdev))
2241 		return count;
2242 
2243 	/* Get stats from hinic_phy_port_stats */
2244 	err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats);
2245 	if (err)
2246 		return err;
2247 
2248 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2249 		xstats[count].value = *(uint64_t *)(((char *)&port_stats) +
2250 				hinic_phyport_stats_strings[i].offset);
2251 		xstats[count].id = count;
2252 		count++;
2253 	}
2254 
2255 	return count;
2256 }
2257 
2258 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2259 				struct rte_eth_rxq_info *qinfo)
2260 {
2261 	struct hinic_rxq  *rxq = dev->data->rx_queues[queue_id];
2262 
2263 	qinfo->mp = rxq->mb_pool;
2264 	qinfo->nb_desc = rxq->q_depth;
2265 }
2266 
2267 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2268 				struct rte_eth_txq_info *qinfo)
2269 {
2270 	struct hinic_txq  *txq = dev->data->tx_queues[queue_id];
2271 
2272 	qinfo->nb_desc = txq->q_depth;
2273 }
2274 
2275 /**
2276  * DPDK callback to retrieve names of extended device statistics
2277  *
2278  * @param dev
2279  *   Pointer to Ethernet device structure.
2280  * @param xstats_names
2281  *   Buffer to insert names into.
2282  *
2283  * @return
2284  *   Number of xstats names.
2285  */
2286 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
2287 			       struct rte_eth_xstat_name *xstats_names,
2288 			       __rte_unused unsigned int limit)
2289 {
2290 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2291 	int count = 0;
2292 	u16 i = 0, q_num;
2293 
2294 	if (xstats_names == NULL)
2295 		return hinic_xstats_calc_num(nic_dev);
2296 
2297 	/* get pmd rxq stats */
2298 	for (q_num = 0; q_num < nic_dev->num_rq; q_num++) {
2299 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2300 			snprintf(xstats_names[count].name,
2301 				 sizeof(xstats_names[count].name),
2302 				 "rxq%d_%s_pmd",
2303 				 q_num, hinic_rxq_stats_strings[i].name);
2304 			count++;
2305 		}
2306 	}
2307 
2308 	/* get pmd txq stats */
2309 	for (q_num = 0; q_num < nic_dev->num_sq; q_num++) {
2310 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2311 			snprintf(xstats_names[count].name,
2312 				 sizeof(xstats_names[count].name),
2313 				 "txq%d_%s_pmd",
2314 				 q_num, hinic_txq_stats_strings[i].name);
2315 			count++;
2316 		}
2317 	}
2318 
2319 	/* get vport stats */
2320 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2321 		snprintf(xstats_names[count].name,
2322 			 sizeof(xstats_names[count].name),
2323 			 "%s", hinic_vport_stats_strings[i].name);
2324 		count++;
2325 	}
2326 
2327 	if (HINIC_IS_VF(nic_dev->hwdev))
2328 		return count;
2329 
2330 	/* get phy port stats */
2331 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2332 		snprintf(xstats_names[count].name,
2333 			 sizeof(xstats_names[count].name),
2334 			 "%s", hinic_phyport_stats_strings[i].name);
2335 		count++;
2336 	}
2337 
2338 	return count;
2339 }
2340 
2341 /**
2342  *  DPDK callback to set mac address
2343  *
2344  * @param dev
2345  *   Pointer to Ethernet device structure.
2346  * @param addr
2347  *   Pointer to mac address
2348  * @return
2349  *   0 on success, negative error value otherwise.
2350  */
2351 static int hinic_set_mac_addr(struct rte_eth_dev *dev,
2352 			      struct rte_ether_addr *addr)
2353 {
2354 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2355 	u16 func_id;
2356 	int err;
2357 
2358 	func_id = hinic_global_func_id(nic_dev->hwdev);
2359 	err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes,
2360 			       addr->addr_bytes, 0, func_id);
2361 	if (err)
2362 		return err;
2363 
2364 	rte_ether_addr_copy(addr, &nic_dev->default_addr);
2365 
2366 	PMD_DRV_LOG(INFO, "Set new mac address " RTE_ETHER_ADDR_PRT_FMT,
2367 		    RTE_ETHER_ADDR_BYTES(addr));
2368 
2369 	return 0;
2370 }
2371 
2372 /**
2373  * DPDK callback to remove a MAC address.
2374  *
2375  * @param dev
2376  *   Pointer to Ethernet device structure.
2377  * @param index
2378  *   MAC address index, should less than 128.
2379  */
2380 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2381 {
2382 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2383 	u16 func_id;
2384 	int ret;
2385 
2386 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2387 		PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range",
2388 			    index);
2389 		return;
2390 	}
2391 
2392 	func_id = hinic_global_func_id(nic_dev->hwdev);
2393 	ret = hinic_del_mac(nic_dev->hwdev,
2394 			    dev->data->mac_addrs[index].addr_bytes, 0, func_id);
2395 	if (ret)
2396 		return;
2397 
2398 	memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
2399 }
2400 
2401 /**
2402  * DPDK callback to add a MAC address.
2403  *
2404  * @param dev
2405  *   Pointer to Ethernet device structure.
2406  * @param mac_addr
2407  *   Pointer to MAC address
2408  * @param index
2409  *   MAC address index, should less than 128.
2410  * @param vmdq
2411  *   VMDq pool index(not used).
2412  *
2413  * @return
2414  *   0 on success, negative error value otherwise.
2415  */
2416 static int hinic_mac_addr_add(struct rte_eth_dev *dev,
2417 			      struct rte_ether_addr *mac_addr, uint32_t index,
2418 			      __rte_unused uint32_t vmdq)
2419 {
2420 	struct hinic_nic_dev  *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2421 	unsigned int i;
2422 	u16 func_id;
2423 	int ret;
2424 
2425 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2426 		PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index);
2427 		return -EINVAL;
2428 	}
2429 
2430 	/* First, make sure this address isn't already configured. */
2431 	for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) {
2432 		/* Skip this index, it's going to be reconfigured. */
2433 		if (i == index)
2434 			continue;
2435 
2436 		if (memcmp(&dev->data->mac_addrs[i],
2437 			mac_addr, sizeof(*mac_addr)))
2438 			continue;
2439 
2440 		PMD_DRV_LOG(INFO, "MAC address already configured");
2441 		return -EADDRINUSE;
2442 	}
2443 
2444 	func_id = hinic_global_func_id(nic_dev->hwdev);
2445 	ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id);
2446 	if (ret)
2447 		return ret;
2448 
2449 	dev->data->mac_addrs[index] = *mac_addr;
2450 	return 0;
2451 }
2452 
2453 /**
2454  *  DPDK callback to set multicast mac address
2455  *
2456  * @param dev
2457  *   Pointer to Ethernet device structure.
2458  * @param mc_addr_set
2459  *   Pointer to multicast mac address
2460  * @param nb_mc_addr
2461  *   mc addr count
2462  * @return
2463  *   0 on success, negative error value otherwise.
2464  */
2465 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
2466 				  struct rte_ether_addr *mc_addr_set,
2467 				  uint32_t nb_mc_addr)
2468 {
2469 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2470 	u16 func_id;
2471 	int ret;
2472 	u32 i;
2473 
2474 	func_id = hinic_global_func_id(nic_dev->hwdev);
2475 
2476 	/* delete old multi_cast addrs firstly */
2477 	hinic_delete_mc_addr_list(nic_dev);
2478 
2479 	if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS)
2480 		goto allmulti;
2481 
2482 	for (i = 0; i < nb_mc_addr; i++) {
2483 		ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes,
2484 				    0, func_id);
2485 		/* if add mc addr failed, set all multi_cast */
2486 		if (ret) {
2487 			hinic_delete_mc_addr_list(nic_dev);
2488 			goto allmulti;
2489 		}
2490 
2491 		rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]);
2492 	}
2493 
2494 	return 0;
2495 
2496 allmulti:
2497 	hinic_dev_allmulticast_enable(dev);
2498 
2499 	return 0;
2500 }
2501 
2502 /**
2503  * DPDK callback to get flow operations
2504  *
2505  * @param dev
2506  *   Pointer to Ethernet device structure.
2507  * @param ops
2508  *   Pointer to operation-specific structure.
2509  *
2510  * @return
2511  *   0 on success, negative error value otherwise.
2512  */
2513 static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
2514 				  const struct rte_flow_ops **ops)
2515 {
2516 	*ops = &hinic_flow_ops;
2517 	return 0;
2518 }
2519 
2520 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)
2521 {
2522 	struct nic_pause_config pause_config = {0};
2523 	int err;
2524 
2525 	pause_config.auto_neg = 0;
2526 	pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2527 	pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2528 
2529 	err = hinic_set_pause_config(nic_dev->hwdev, pause_config);
2530 	if (err)
2531 		return err;
2532 
2533 	nic_dev->pause_set = true;
2534 	nic_dev->nic_pause.auto_neg = pause_config.auto_neg;
2535 	nic_dev->nic_pause.rx_pause = pause_config.rx_pause;
2536 	nic_dev->nic_pause.tx_pause = pause_config.tx_pause;
2537 
2538 	return 0;
2539 }
2540 
2541 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev)
2542 {
2543 	u8 up_tc[HINIC_DCB_UP_MAX] = {0};
2544 	u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
2545 	u8 up_bw[HINIC_DCB_UP_MAX] = {0};
2546 	u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
2547 	u8 up_strict[HINIC_DCB_UP_MAX] = {0};
2548 	int i = 0;
2549 
2550 	pg_bw[0] = 100;
2551 	for (i = 0; i < HINIC_DCB_UP_MAX; i++)
2552 		up_bw[i] = 100;
2553 
2554 	return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw,
2555 					up_pgid, up_bw, up_strict);
2556 }
2557 
2558 static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id)
2559 {
2560 	u8 default_cos = 0;
2561 	u8 valid_cos_bitmap;
2562 	u8 i;
2563 
2564 	valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap;
2565 	if (!valid_cos_bitmap) {
2566 		PMD_DRV_LOG(ERR, "PF has none cos to support\n");
2567 		return -EFAULT;
2568 	}
2569 
2570 	for (i = 0; i < NR_MAX_COS; i++) {
2571 		if (valid_cos_bitmap & BIT(i))
2572 			default_cos = i; /* Find max cos id as default cos */
2573 	}
2574 
2575 	*cos_id = default_cos;
2576 
2577 	return 0;
2578 }
2579 
2580 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev)
2581 {
2582 	u8 cos_id = 0;
2583 	int err;
2584 
2585 	if (!HINIC_IS_VF(nic_dev->hwdev)) {
2586 		err = hinic_pf_get_default_cos(nic_dev->hwdev, &cos_id);
2587 		if (err) {
2588 			PMD_DRV_LOG(ERR, "Get PF default cos failed, err: %d",
2589 				    err);
2590 			return HINIC_ERROR;
2591 		}
2592 	} else {
2593 		err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id);
2594 		if (err) {
2595 			PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d",
2596 				    err);
2597 			return HINIC_ERROR;
2598 		}
2599 	}
2600 
2601 	nic_dev->default_cos = cos_id;
2602 
2603 	PMD_DRV_LOG(INFO, "Default cos %d", nic_dev->default_cos);
2604 
2605 	return 0;
2606 }
2607 
2608 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
2609 {
2610 	int err;
2611 
2612 	err = hinic_init_default_cos(nic_dev);
2613 	if (err)
2614 		return err;
2615 
2616 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2617 		return 0;
2618 
2619 	/* Restore DCB configure to default status */
2620 	err = hinic_set_default_dcb_feature(nic_dev);
2621 	if (err)
2622 		return err;
2623 
2624 	/* Set pause enable, and up will disable pfc. */
2625 	err = hinic_set_default_pause_feature(nic_dev);
2626 	if (err)
2627 		return err;
2628 
2629 	err = hinic_reset_port_link_cfg(nic_dev->hwdev);
2630 	if (err)
2631 		return err;
2632 
2633 	err = hinic_set_link_status_follow(nic_dev->hwdev,
2634 					   HINIC_LINK_FOLLOW_PORT);
2635 	if (err == HINIC_MGMT_CMD_UNSUPPORTED)
2636 		PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status");
2637 	else if (err)
2638 		return err;
2639 
2640 	return hinic_set_anti_attack(nic_dev->hwdev, true);
2641 }
2642 
2643 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev)
2644 {
2645 	struct hinic_board_info info = { 0 };
2646 	int rc;
2647 
2648 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2649 		return 0;
2650 
2651 	rc = hinic_get_board_info(nic_dev->hwdev, &info);
2652 	if (rc)
2653 		return rc;
2654 
2655 	return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK :
2656 						HINIC_ERROR);
2657 }
2658 
2659 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev)
2660 {
2661 	nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name);
2662 	if (nic_dev->cpy_mpool == NULL) {
2663 		nic_dev->cpy_mpool =
2664 		rte_pktmbuf_pool_create(nic_dev->proc_dev_name,
2665 					HINIC_COPY_MEMPOOL_DEPTH,
2666 					0, 0,
2667 					HINIC_COPY_MBUF_SIZE,
2668 					rte_socket_id());
2669 		if (!nic_dev->cpy_mpool) {
2670 			PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s",
2671 				    rte_errno, nic_dev->proc_dev_name);
2672 			return -ENOMEM;
2673 		}
2674 	}
2675 
2676 	return 0;
2677 }
2678 
2679 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev)
2680 {
2681 	if (nic_dev->cpy_mpool != NULL)
2682 		rte_mempool_free(nic_dev->cpy_mpool);
2683 }
2684 
2685 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2686 {
2687 	u32 txq_size;
2688 	u32 rxq_size;
2689 
2690 	/* allocate software txq array */
2691 	txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs);
2692 	nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL);
2693 	if (!nic_dev->txqs) {
2694 		PMD_DRV_LOG(ERR, "Allocate txqs failed");
2695 		return -ENOMEM;
2696 	}
2697 
2698 	/* allocate software rxq array */
2699 	rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs);
2700 	nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL);
2701 	if (!nic_dev->rxqs) {
2702 		/* free txqs */
2703 		kfree(nic_dev->txqs);
2704 		nic_dev->txqs = NULL;
2705 
2706 		PMD_DRV_LOG(ERR, "Allocate rxqs failed");
2707 		return -ENOMEM;
2708 	}
2709 
2710 	return HINIC_OK;
2711 }
2712 
2713 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2714 {
2715 	kfree(nic_dev->txqs);
2716 	nic_dev->txqs = NULL;
2717 
2718 	kfree(nic_dev->rxqs);
2719 	nic_dev->rxqs = NULL;
2720 }
2721 
2722 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
2723 {
2724 	struct hinic_nic_dev *nic_dev =
2725 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2726 	int rc;
2727 
2728 	nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev),
2729 				     RTE_CACHE_LINE_SIZE);
2730 	if (!nic_dev->hwdev) {
2731 		PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s",
2732 			    eth_dev->data->name);
2733 		return -ENOMEM;
2734 	}
2735 	nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev);
2736 
2737 	/* init osdep*/
2738 	rc = hinic_osdep_init(nic_dev->hwdev);
2739 	if (rc) {
2740 		PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s",
2741 			    eth_dev->data->name);
2742 		goto init_osdep_fail;
2743 	}
2744 
2745 	/* init_hwif */
2746 	rc = hinic_hwif_res_init(nic_dev->hwdev);
2747 	if (rc) {
2748 		PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
2749 			    eth_dev->data->name);
2750 		goto init_hwif_fail;
2751 	}
2752 
2753 	/* init_cfg_mgmt */
2754 	rc = init_cfg_mgmt(nic_dev->hwdev);
2755 	if (rc) {
2756 		PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s",
2757 			    eth_dev->data->name);
2758 		goto init_cfgmgnt_fail;
2759 	}
2760 
2761 	/* init_aeqs */
2762 	rc = hinic_comm_aeqs_init(nic_dev->hwdev);
2763 	if (rc) {
2764 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s",
2765 			    eth_dev->data->name);
2766 		goto init_aeqs_fail;
2767 	}
2768 
2769 	/* init_pf_to_mgnt */
2770 	rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev);
2771 	if (rc) {
2772 		PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s",
2773 			    eth_dev->data->name);
2774 		goto init_pf_to_mgmt_fail;
2775 	}
2776 
2777 	/* init mailbox */
2778 	rc = hinic_comm_func_to_func_init(nic_dev->hwdev);
2779 	if (rc) {
2780 		PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s",
2781 			    eth_dev->data->name);
2782 		goto init_func_to_func_fail;
2783 	}
2784 
2785 	rc = hinic_card_workmode_check(nic_dev);
2786 	if (rc) {
2787 		PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s",
2788 			    eth_dev->data->name);
2789 		goto workmode_check_fail;
2790 	}
2791 
2792 	/* do l2nic reset to make chip clear */
2793 	rc = hinic_l2nic_reset(nic_dev->hwdev);
2794 	if (rc) {
2795 		PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s",
2796 			    eth_dev->data->name);
2797 		goto l2nic_reset_fail;
2798 	}
2799 
2800 	/* init dma and aeq msix attribute table */
2801 	(void)hinic_init_attr_table(nic_dev->hwdev);
2802 
2803 	/* init_cmdqs */
2804 	rc = hinic_comm_cmdqs_init(nic_dev->hwdev);
2805 	if (rc) {
2806 		PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s",
2807 			    eth_dev->data->name);
2808 		goto init_cmdq_fail;
2809 	}
2810 
2811 	/* set hardware state active */
2812 	rc = hinic_activate_hwdev_state(nic_dev->hwdev);
2813 	if (rc) {
2814 		PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s",
2815 			    eth_dev->data->name);
2816 		goto init_resources_state_fail;
2817 	}
2818 
2819 	/* init_capability */
2820 	rc = hinic_init_capability(nic_dev->hwdev);
2821 	if (rc) {
2822 		PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s",
2823 			    eth_dev->data->name);
2824 		goto init_cap_fail;
2825 	}
2826 
2827 	/* get nic capability */
2828 	if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) {
2829 		PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s",
2830 			    eth_dev->data->name);
2831 		rc = -EINVAL;
2832 		goto nic_check_fail;
2833 	}
2834 
2835 	/* init root cla and function table */
2836 	rc = hinic_init_nicio(nic_dev->hwdev);
2837 	if (rc) {
2838 		PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s",
2839 			    eth_dev->data->name);
2840 		goto init_nicio_fail;
2841 	}
2842 
2843 	/* init_software_txrxq */
2844 	rc = hinic_init_sw_rxtxqs(nic_dev);
2845 	if (rc) {
2846 		PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s",
2847 			    eth_dev->data->name);
2848 		goto init_sw_rxtxqs_fail;
2849 	}
2850 
2851 	rc = hinic_copy_mempool_init(nic_dev);
2852 	if (rc) {
2853 		PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s",
2854 			 eth_dev->data->name);
2855 		goto init_mpool_fail;
2856 	}
2857 
2858 	/* set hardware feature to default status */
2859 	rc = hinic_set_default_hw_feature(nic_dev);
2860 	if (rc) {
2861 		PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s",
2862 			    eth_dev->data->name);
2863 		goto set_default_hw_feature_fail;
2864 	}
2865 
2866 	return 0;
2867 
2868 set_default_hw_feature_fail:
2869 	hinic_copy_mempool_uninit(nic_dev);
2870 
2871 init_mpool_fail:
2872 	hinic_deinit_sw_rxtxqs(nic_dev);
2873 
2874 init_sw_rxtxqs_fail:
2875 	hinic_deinit_nicio(nic_dev->hwdev);
2876 
2877 nic_check_fail:
2878 init_nicio_fail:
2879 init_cap_fail:
2880 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2881 
2882 init_resources_state_fail:
2883 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2884 
2885 init_cmdq_fail:
2886 l2nic_reset_fail:
2887 workmode_check_fail:
2888 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2889 
2890 init_func_to_func_fail:
2891 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2892 
2893 init_pf_to_mgmt_fail:
2894 	hinic_comm_aeqs_free(nic_dev->hwdev);
2895 
2896 init_aeqs_fail:
2897 	free_cfg_mgmt(nic_dev->hwdev);
2898 
2899 init_cfgmgnt_fail:
2900 	hinic_hwif_res_free(nic_dev->hwdev);
2901 
2902 init_hwif_fail:
2903 	hinic_osdep_deinit(nic_dev->hwdev);
2904 
2905 init_osdep_fail:
2906 	rte_free(nic_dev->hwdev);
2907 	nic_dev->hwdev = NULL;
2908 
2909 	return rc;
2910 }
2911 
2912 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev)
2913 {
2914 	struct hinic_nic_dev *nic_dev =
2915 			HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2916 
2917 	(void)hinic_set_link_status_follow(nic_dev->hwdev,
2918 					   HINIC_LINK_FOLLOW_DEFAULT);
2919 	hinic_copy_mempool_uninit(nic_dev);
2920 	hinic_deinit_sw_rxtxqs(nic_dev);
2921 	hinic_deinit_nicio(nic_dev->hwdev);
2922 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2923 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2924 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2925 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2926 	hinic_comm_aeqs_free(nic_dev->hwdev);
2927 	free_cfg_mgmt(nic_dev->hwdev);
2928 	hinic_hwif_res_free(nic_dev->hwdev);
2929 	hinic_osdep_deinit(nic_dev->hwdev);
2930 	rte_free(nic_dev->hwdev);
2931 	nic_dev->hwdev = NULL;
2932 }
2933 
2934 /**
2935  * DPDK callback to close the device.
2936  *
2937  * @param dev
2938  *   Pointer to Ethernet device structure.
2939  */
2940 static int hinic_dev_close(struct rte_eth_dev *dev)
2941 {
2942 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2943 	int ret;
2944 
2945 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2946 		return 0;
2947 
2948 	if (rte_bit_relaxed_test_and_set32(HINIC_DEV_CLOSE,
2949 					   &nic_dev->dev_status)) {
2950 		PMD_DRV_LOG(WARNING, "Device %s already closed",
2951 			    dev->data->name);
2952 		return 0;
2953 	}
2954 
2955 	/* stop device first */
2956 	ret = hinic_dev_stop(dev);
2957 
2958 	/* rx_cqe, rx_info */
2959 	hinic_free_all_rx_resources(dev);
2960 
2961 	/* tx_info */
2962 	hinic_free_all_tx_resources(dev);
2963 
2964 	/* free wq, pi_dma_addr */
2965 	hinic_free_all_rq(nic_dev);
2966 
2967 	/* free wq, db_addr */
2968 	hinic_free_all_sq(nic_dev);
2969 
2970 	/* deinit mac vlan tbl */
2971 	hinic_deinit_mac_addr(dev);
2972 	hinic_remove_all_vlanid(dev);
2973 
2974 	/* disable hardware and uio interrupt */
2975 	hinic_disable_interrupt(dev);
2976 
2977 	/* destroy rx mode mutex */
2978 	hinic_mutex_destroy(&nic_dev->rx_mode_mutex);
2979 
2980 	/* deinit nic hardware device */
2981 	hinic_nic_dev_destroy(dev);
2982 
2983 	return ret;
2984 }
2985 
2986 static const struct eth_dev_ops hinic_pmd_ops = {
2987 	.dev_configure                 = hinic_dev_configure,
2988 	.dev_infos_get                 = hinic_dev_infos_get,
2989 	.fw_version_get                = hinic_fw_version_get,
2990 	.rx_queue_setup                = hinic_rx_queue_setup,
2991 	.tx_queue_setup                = hinic_tx_queue_setup,
2992 	.dev_start                     = hinic_dev_start,
2993 	.dev_set_link_up               = hinic_dev_set_link_up,
2994 	.dev_set_link_down             = hinic_dev_set_link_down,
2995 	.link_update                   = hinic_link_update,
2996 	.rx_queue_release              = hinic_rx_queue_release,
2997 	.tx_queue_release              = hinic_tx_queue_release,
2998 	.dev_stop                      = hinic_dev_stop,
2999 	.dev_close                     = hinic_dev_close,
3000 	.mtu_set                       = hinic_dev_set_mtu,
3001 	.vlan_filter_set               = hinic_vlan_filter_set,
3002 	.vlan_offload_set              = hinic_vlan_offload_set,
3003 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3004 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3005 	.promiscuous_enable            = hinic_dev_promiscuous_enable,
3006 	.promiscuous_disable           = hinic_dev_promiscuous_disable,
3007 	.flow_ctrl_get                 = hinic_flow_ctrl_get,
3008 	.flow_ctrl_set                 = hinic_flow_ctrl_set,
3009 	.rss_hash_update               = hinic_rss_hash_update,
3010 	.rss_hash_conf_get             = hinic_rss_conf_get,
3011 	.reta_update                   = hinic_rss_indirtbl_update,
3012 	.reta_query                    = hinic_rss_indirtbl_query,
3013 	.stats_get                     = hinic_dev_stats_get,
3014 	.stats_reset                   = hinic_dev_stats_reset,
3015 	.xstats_get                    = hinic_dev_xstats_get,
3016 	.xstats_reset                  = hinic_dev_xstats_reset,
3017 	.xstats_get_names              = hinic_dev_xstats_get_names,
3018 	.rxq_info_get                  = hinic_rxq_info_get,
3019 	.txq_info_get                  = hinic_txq_info_get,
3020 	.mac_addr_set                  = hinic_set_mac_addr,
3021 	.mac_addr_remove               = hinic_mac_addr_remove,
3022 	.mac_addr_add                  = hinic_mac_addr_add,
3023 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3024 	.flow_ops_get                  = hinic_dev_flow_ops_get,
3025 };
3026 
3027 static const struct eth_dev_ops hinic_pmd_vf_ops = {
3028 	.dev_configure                 = hinic_dev_configure,
3029 	.dev_infos_get                 = hinic_dev_infos_get,
3030 	.fw_version_get                = hinic_fw_version_get,
3031 	.rx_queue_setup                = hinic_rx_queue_setup,
3032 	.tx_queue_setup                = hinic_tx_queue_setup,
3033 	.dev_start                     = hinic_dev_start,
3034 	.link_update                   = hinic_link_update,
3035 	.rx_queue_release              = hinic_rx_queue_release,
3036 	.tx_queue_release              = hinic_tx_queue_release,
3037 	.dev_stop                      = hinic_dev_stop,
3038 	.dev_close                     = hinic_dev_close,
3039 	.mtu_set                       = hinic_dev_set_mtu,
3040 	.vlan_filter_set               = hinic_vlan_filter_set,
3041 	.vlan_offload_set              = hinic_vlan_offload_set,
3042 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3043 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3044 	.rss_hash_update               = hinic_rss_hash_update,
3045 	.rss_hash_conf_get             = hinic_rss_conf_get,
3046 	.reta_update                   = hinic_rss_indirtbl_update,
3047 	.reta_query                    = hinic_rss_indirtbl_query,
3048 	.stats_get                     = hinic_dev_stats_get,
3049 	.stats_reset                   = hinic_dev_stats_reset,
3050 	.xstats_get                    = hinic_dev_xstats_get,
3051 	.xstats_reset                  = hinic_dev_xstats_reset,
3052 	.xstats_get_names              = hinic_dev_xstats_get_names,
3053 	.rxq_info_get                  = hinic_rxq_info_get,
3054 	.txq_info_get                  = hinic_txq_info_get,
3055 	.mac_addr_set                  = hinic_set_mac_addr,
3056 	.mac_addr_remove               = hinic_mac_addr_remove,
3057 	.mac_addr_add                  = hinic_mac_addr_add,
3058 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3059 	.flow_ops_get                  = hinic_dev_flow_ops_get,
3060 };
3061 
3062 static const struct eth_dev_ops hinic_dev_sec_ops = {
3063 	.dev_infos_get                 = hinic_dev_infos_get,
3064 };
3065 
3066 static int hinic_func_init(struct rte_eth_dev *eth_dev)
3067 {
3068 	struct rte_pci_device *pci_dev;
3069 	struct rte_ether_addr *eth_addr;
3070 	struct hinic_nic_dev *nic_dev;
3071 	struct hinic_filter_info *filter_info;
3072 	struct hinic_tcam_info *tcam_info;
3073 	u32 mac_size;
3074 	int rc;
3075 
3076 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3077 
3078 	/* EAL is SECONDARY and eth_dev is already created */
3079 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3080 		eth_dev->dev_ops = &hinic_dev_sec_ops;
3081 		PMD_DRV_LOG(INFO, "Initialize %s in secondary process",
3082 			    eth_dev->data->name);
3083 
3084 		return 0;
3085 	}
3086 
3087 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3088 
3089 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
3090 	memset(nic_dev, 0, sizeof(*nic_dev));
3091 
3092 	snprintf(nic_dev->proc_dev_name,
3093 		 sizeof(nic_dev->proc_dev_name),
3094 		 "hinic-%.4x:%.2x:%.2x.%x",
3095 		 pci_dev->addr.domain, pci_dev->addr.bus,
3096 		 pci_dev->addr.devid, pci_dev->addr.function);
3097 
3098 	/* alloc mac_addrs */
3099 	mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3100 	eth_addr = rte_zmalloc("hinic_mac", mac_size, 0);
3101 	if (!eth_addr) {
3102 		PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s",
3103 			    eth_dev->data->name);
3104 		rc = -ENOMEM;
3105 		goto eth_addr_fail;
3106 	}
3107 	eth_dev->data->mac_addrs = eth_addr;
3108 
3109 	mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3110 	nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0);
3111 	if (!nic_dev->mc_list) {
3112 		PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s",
3113 			    eth_dev->data->name);
3114 		rc = -ENOMEM;
3115 		goto mc_addr_fail;
3116 	}
3117 
3118 	/* create hardware nic_device */
3119 	rc = hinic_nic_dev_create(eth_dev);
3120 	if (rc) {
3121 		PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s",
3122 			    eth_dev->data->name);
3123 		goto create_nic_dev_fail;
3124 	}
3125 
3126 	if (HINIC_IS_VF(nic_dev->hwdev))
3127 		eth_dev->dev_ops = &hinic_pmd_vf_ops;
3128 	else
3129 		eth_dev->dev_ops = &hinic_pmd_ops;
3130 
3131 	rc = hinic_init_mac_addr(eth_dev);
3132 	if (rc) {
3133 		PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s",
3134 			    eth_dev->data->name);
3135 		goto init_mac_fail;
3136 	}
3137 
3138 	/* register callback func to eal lib */
3139 	rc = rte_intr_callback_register(&pci_dev->intr_handle,
3140 					hinic_dev_interrupt_handler,
3141 					(void *)eth_dev);
3142 	if (rc) {
3143 		PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s",
3144 			    eth_dev->data->name);
3145 		goto reg_intr_cb_fail;
3146 	}
3147 
3148 	/* enable uio/vfio intr/eventfd mapping */
3149 	rc = rte_intr_enable(&pci_dev->intr_handle);
3150 	if (rc) {
3151 		PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s",
3152 			    eth_dev->data->name);
3153 		goto enable_intr_fail;
3154 	}
3155 	rte_bit_relaxed_set32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
3156 
3157 	hinic_mutex_init(&nic_dev->rx_mode_mutex, NULL);
3158 
3159 	/* initialize filter info */
3160 	filter_info = &nic_dev->filter;
3161 	tcam_info = &nic_dev->tcam;
3162 	memset(filter_info, 0, sizeof(struct hinic_filter_info));
3163 	memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
3164 	/* initialize 5tuple filter list */
3165 	TAILQ_INIT(&filter_info->fivetuple_list);
3166 	TAILQ_INIT(&tcam_info->tcam_list);
3167 	TAILQ_INIT(&nic_dev->filter_ntuple_list);
3168 	TAILQ_INIT(&nic_dev->filter_ethertype_list);
3169 	TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
3170 	TAILQ_INIT(&nic_dev->hinic_flow_list);
3171 
3172 	rte_bit_relaxed_set32(HINIC_DEV_INIT, &nic_dev->dev_status);
3173 	PMD_DRV_LOG(INFO, "Initialize %s in primary successfully",
3174 		    eth_dev->data->name);
3175 
3176 	return 0;
3177 
3178 enable_intr_fail:
3179 	(void)rte_intr_callback_unregister(&pci_dev->intr_handle,
3180 					   hinic_dev_interrupt_handler,
3181 					   (void *)eth_dev);
3182 
3183 reg_intr_cb_fail:
3184 	hinic_deinit_mac_addr(eth_dev);
3185 
3186 init_mac_fail:
3187 	eth_dev->dev_ops = NULL;
3188 	hinic_nic_dev_destroy(eth_dev);
3189 
3190 create_nic_dev_fail:
3191 	rte_free(nic_dev->mc_list);
3192 	nic_dev->mc_list = NULL;
3193 
3194 mc_addr_fail:
3195 	rte_free(eth_addr);
3196 	eth_dev->data->mac_addrs = NULL;
3197 
3198 eth_addr_fail:
3199 	PMD_DRV_LOG(ERR, "Initialize %s in primary failed",
3200 		    eth_dev->data->name);
3201 	return rc;
3202 }
3203 
3204 static int hinic_dev_init(struct rte_eth_dev *eth_dev)
3205 {
3206 	struct rte_pci_device *pci_dev;
3207 
3208 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3209 
3210 	PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process",
3211 		    pci_dev->addr.domain, pci_dev->addr.bus,
3212 		    pci_dev->addr.devid, pci_dev->addr.function,
3213 		    (rte_eal_process_type() == RTE_PROC_PRIMARY) ?
3214 		    "primary" : "secondary");
3215 
3216 	/* rte_eth_dev rx_burst and tx_burst */
3217 	eth_dev->rx_pkt_burst = hinic_recv_pkts;
3218 	eth_dev->tx_pkt_burst = hinic_xmit_pkts;
3219 
3220 	return hinic_func_init(eth_dev);
3221 }
3222 
3223 static int hinic_dev_uninit(struct rte_eth_dev *dev)
3224 {
3225 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3226 		return 0;
3227 
3228 	hinic_dev_close(dev);
3229 
3230 	return HINIC_OK;
3231 }
3232 
3233 static struct rte_pci_id pci_id_hinic_map[] = {
3234 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
3235 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
3236 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
3237 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
3238 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },
3239 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) },
3240 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) },
3241 	{.vendor_id = 0},
3242 };
3243 
3244 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3245 			   struct rte_pci_device *pci_dev)
3246 {
3247 	return rte_eth_dev_pci_generic_probe(pci_dev,
3248 		sizeof(struct hinic_nic_dev), hinic_dev_init);
3249 }
3250 
3251 static int hinic_pci_remove(struct rte_pci_device *pci_dev)
3252 {
3253 	return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit);
3254 }
3255 
3256 static struct rte_pci_driver rte_hinic_pmd = {
3257 	.id_table = pci_id_hinic_map,
3258 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3259 	.probe = hinic_pci_probe,
3260 	.remove = hinic_pci_remove,
3261 };
3262 
3263 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd);
3264 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map);
3265 RTE_LOG_REGISTER_DEFAULT(hinic_logtype, INFO);
3266