xref: /dpdk/drivers/net/hinic/hinic_pmd_ethdev.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include <rte_pci.h>
6 #include <rte_bus_pci.h>
7 #include <ethdev_pci.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_ether.h>
14 
15 #include "base/hinic_compat.h"
16 #include "base/hinic_pmd_hwdev.h"
17 #include "base/hinic_pmd_hwif.h"
18 #include "base/hinic_pmd_wq.h"
19 #include "base/hinic_pmd_cfg.h"
20 #include "base/hinic_pmd_mgmt.h"
21 #include "base/hinic_pmd_cmdq.h"
22 #include "base/hinic_pmd_niccfg.h"
23 #include "base/hinic_pmd_nicio.h"
24 #include "base/hinic_pmd_mbox.h"
25 #include "hinic_pmd_ethdev.h"
26 #include "hinic_pmd_tx.h"
27 #include "hinic_pmd_rx.h"
28 
29 /* Vendor ID used by Huawei devices */
30 #define HINIC_HUAWEI_VENDOR_ID		0x19E5
31 
32 /* Hinic devices */
33 #define HINIC_DEV_ID_PRD		0x1822
34 #define HINIC_DEV_ID_VF			0x375E
35 #define HINIC_DEV_ID_VF_HV		0x379E
36 
37 /* Mezz card for Blade Server */
38 #define HINIC_DEV_ID_MEZZ_25GE		0x0210
39 #define HINIC_DEV_ID_MEZZ_100GE		0x0205
40 
41 /* 2*25G and 2*100G card */
42 #define HINIC_DEV_ID_1822_DUAL_25GE	0x0206
43 #define HINIC_DEV_ID_1822_100GE		0x0200
44 
45 #define HINIC_SERVICE_MODE_NIC		2
46 
47 #define HINIC_INTR_CB_UNREG_MAX_RETRIES	10
48 
49 #define DEFAULT_BASE_COS		4
50 #define NR_MAX_COS			8
51 
52 #define HINIC_MIN_RX_BUF_SIZE		1024
53 #define HINIC_MAX_UC_MAC_ADDRS		128
54 #define HINIC_MAX_MC_MAC_ADDRS		2048
55 
56 #define HINIC_DEFAULT_BURST_SIZE	32
57 #define HINIC_DEFAULT_NB_QUEUES		1
58 #define HINIC_DEFAULT_RING_SIZE		1024
59 #define HINIC_MAX_LRO_SIZE		65536
60 
61 /*
62  * vlan_id is a 12 bit number.
63  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
64  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
65  * The higher 7 bit val specifies VFTA array index.
66  */
67 #define HINIC_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
68 #define HINIC_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
69 
70 #define HINIC_VLAN_FILTER_EN		(1U << 0)
71 
72 #define HINIC_MTU_TO_PKTLEN(mtu)	\
73 	((mtu) + ETH_HLEN + ETH_CRC_LEN)
74 
75 #define HINIC_PKTLEN_TO_MTU(pktlen)	\
76 	((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
77 
78 /* The max frame size with default MTU */
79 #define HINIC_ETH_MAX_LEN (RTE_ETHER_MTU + ETH_HLEN + ETH_CRC_LEN)
80 
81 /* lro numer limit for one packet */
82 #define HINIC_LRO_WQE_NUM_DEFAULT	8
83 
84 struct hinic_xstats_name_off {
85 	char name[RTE_ETH_XSTATS_NAME_SIZE];
86 	u32  offset;
87 };
88 
89 #define HINIC_FUNC_STAT(_stat_item) {	\
90 	.name = #_stat_item, \
91 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
92 }
93 
94 #define HINIC_PORT_STAT(_stat_item) { \
95 	.name = #_stat_item, \
96 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
97 }
98 
99 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = {
100 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
101 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
102 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
103 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
104 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
105 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
106 
107 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
108 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
109 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
110 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
111 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
112 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
113 
114 	HINIC_FUNC_STAT(tx_discard_vport),
115 	HINIC_FUNC_STAT(rx_discard_vport),
116 	HINIC_FUNC_STAT(tx_err_vport),
117 	HINIC_FUNC_STAT(rx_err_vport),
118 };
119 
120 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \
121 		sizeof(hinic_vport_stats_strings[0]))
122 
123 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = {
124 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
125 	HINIC_PORT_STAT(mac_rx_total_oct_num),
126 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
127 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
128 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
129 	HINIC_PORT_STAT(mac_rx_good_oct_num),
130 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
131 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
132 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
133 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
134 	HINIC_PORT_STAT(mac_tx_total_oct_num),
135 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
136 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
137 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
138 	HINIC_PORT_STAT(mac_tx_good_oct_num),
139 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
140 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
141 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
142 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
143 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
144 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
145 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
146 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
147 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
148 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
149 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
150 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
151 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
152 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
153 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
154 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
155 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
156 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
157 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
158 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
159 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
160 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
161 	HINIC_PORT_STAT(mac_rx_mac_pause_num),
162 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
163 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
164 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
165 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
166 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
167 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
168 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
169 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
170 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
171 	HINIC_PORT_STAT(mac_rx_mac_control_pkt_num),
172 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
173 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
174 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
175 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
176 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
177 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
178 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
179 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
180 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
181 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
182 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
183 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
184 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
185 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
186 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
187 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
188 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
189 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
190 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
191 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
192 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
193 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
194 	HINIC_PORT_STAT(mac_trans_jabber_pkt_num),
195 	HINIC_PORT_STAT(mac_tx_mac_pause_num),
196 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
197 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
198 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
199 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
200 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
201 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
202 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
203 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
204 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
205 	HINIC_PORT_STAT(mac_tx_mac_control_pkt_num),
206 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
207 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
208 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
209 };
210 
211 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \
212 		sizeof(hinic_phyport_stats_strings[0]))
213 
214 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = {
215 	{"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)},
216 	{"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)},
217 };
218 
219 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \
220 		sizeof(hinic_rxq_stats_strings[0]))
221 
222 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = {
223 	{"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)},
224 	{"offload_errors", offsetof(struct hinic_txq_stats, off_errs)},
225 	{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
226 	{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
227 	{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
228 	{"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)},
229 	{"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)},
230 };
231 
232 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
233 		sizeof(hinic_txq_stats_strings[0]))
234 
235 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev)
236 {
237 	if (HINIC_IS_VF(nic_dev->hwdev)) {
238 		return (HINIC_VPORT_XSTATS_NUM +
239 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
240 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
241 	} else {
242 		return (HINIC_VPORT_XSTATS_NUM +
243 			HINIC_PHYPORT_XSTATS_NUM +
244 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
245 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
246 	}
247 }
248 
249 static const struct rte_eth_desc_lim hinic_rx_desc_lim = {
250 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
251 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
252 	.nb_align = HINIC_RXD_ALIGN,
253 };
254 
255 static const struct rte_eth_desc_lim hinic_tx_desc_lim = {
256 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
257 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
258 	.nb_align = HINIC_TXD_ALIGN,
259 };
260 
261 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
262 
263 /**
264  * Interrupt handler triggered by NIC  for handling
265  * specific event.
266  *
267  * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
268  */
269 static void hinic_dev_interrupt_handler(void *param)
270 {
271 	struct rte_eth_dev *dev = param;
272 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
273 
274 	if (!rte_bit_relaxed_get32(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) {
275 		PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d",
276 			    nic_dev->proc_dev_name, dev->data->port_id);
277 		return;
278 	}
279 
280 	/* aeq0 msg handler */
281 	hinic_dev_handle_aeq_event(nic_dev->hwdev, param);
282 }
283 
284 /**
285  * Ethernet device configuration.
286  *
287  * Prepare the driver for a given number of TX and RX queues, mtu size
288  * and configure RSS.
289  *
290  * @param dev
291  *   Pointer to Ethernet device structure.
292  *
293  * @return
294  *   0 on success, negative error value otherwise.
295  */
296 static int hinic_dev_configure(struct rte_eth_dev *dev)
297 {
298 	struct hinic_nic_dev *nic_dev;
299 	struct hinic_nic_io *nic_io;
300 	int err;
301 
302 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
303 	nic_io = nic_dev->hwdev->nic_io;
304 
305 	nic_dev->num_sq =  dev->data->nb_tx_queues;
306 	nic_dev->num_rq = dev->data->nb_rx_queues;
307 
308 	nic_io->num_sqs =  dev->data->nb_tx_queues;
309 	nic_io->num_rqs = dev->data->nb_rx_queues;
310 
311 	/* queue pair is max_num(sq, rq) */
312 	nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ?
313 			nic_dev->num_sq : nic_dev->num_rq;
314 	nic_io->num_qps = nic_dev->num_qps;
315 
316 	if (nic_dev->num_qps > nic_io->max_qps) {
317 		PMD_DRV_LOG(ERR,
318 			"Queue number out of range, get queue_num:%d, max_queue_num:%d",
319 			nic_dev->num_qps, nic_io->max_qps);
320 		return -EINVAL;
321 	}
322 
323 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
324 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
325 
326 	/* mtu size is 256~9600 */
327 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
328 	    dev->data->dev_conf.rxmode.max_rx_pkt_len >
329 	    HINIC_MAX_JUMBO_FRAME_SIZE) {
330 		PMD_DRV_LOG(ERR,
331 			"Max rx pkt len out of range, get max_rx_pkt_len:%d, "
332 			"expect between %d and %d",
333 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
334 			HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
335 		return -EINVAL;
336 	}
337 
338 	nic_dev->mtu_size =
339 		HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
340 
341 	/* rss template */
342 	err = hinic_config_mq_mode(dev, TRUE);
343 	if (err) {
344 		PMD_DRV_LOG(ERR, "Config multi-queue failed");
345 		return err;
346 	}
347 
348 	/* init vlan offoad */
349 	err = hinic_vlan_offload_set(dev,
350 				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
351 	if (err) {
352 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed");
353 		(void)hinic_config_mq_mode(dev, FALSE);
354 		return err;
355 	}
356 
357 	/* clear fdir filter flag in function table */
358 	hinic_free_fdir_filter(nic_dev);
359 
360 	return HINIC_OK;
361 }
362 
363 /**
364  * DPDK callback to create the receive queue.
365  *
366  * @param dev
367  *   Pointer to Ethernet device structure.
368  * @param queue_idx
369  *   RX queue index.
370  * @param nb_desc
371  *   Number of descriptors for receive queue.
372  * @param socket_id
373  *   NUMA socket on which memory must be allocated.
374  * @param rx_conf
375  *   Thresholds parameters (unused_).
376  * @param mp
377  *   Memory pool for buffer allocations.
378  *
379  * @return
380  *   0 on success, negative error value otherwise.
381  */
382 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
383 			 uint16_t nb_desc, unsigned int socket_id,
384 			 __rte_unused const struct rte_eth_rxconf *rx_conf,
385 			 struct rte_mempool *mp)
386 {
387 	int rc;
388 	struct hinic_nic_dev *nic_dev;
389 	struct hinic_hwdev *hwdev;
390 	struct hinic_rxq *rxq;
391 	u16 rq_depth, rx_free_thresh;
392 	u32 buf_size;
393 
394 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
395 	hwdev = nic_dev->hwdev;
396 
397 	/* queue depth must be power of 2, otherwise will be aligned up */
398 	rq_depth = (nb_desc & (nb_desc - 1)) ?
399 		((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
400 
401 	/*
402 	 * Validate number of receive descriptors.
403 	 * It must not exceed hardware maximum and minimum.
404 	 */
405 	if (rq_depth > HINIC_MAX_QUEUE_DEPTH ||
406 		rq_depth < HINIC_MIN_QUEUE_DEPTH) {
407 		PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
408 			    HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
409 			    (int)nb_desc, (int)rq_depth,
410 			    (int)dev->data->port_id, (int)queue_idx);
411 		return -EINVAL;
412 	}
413 
414 	/*
415 	 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh
416 	 * descriptors are used or if the number of descriptors required
417 	 * to transmit a packet is greater than the number of free RX
418 	 * descriptors.
419 	 * The following constraints must be satisfied:
420 	 *  rx_free_thresh must be greater than 0.
421 	 *  rx_free_thresh must be less than the size of the ring minus 1.
422 	 * When set to zero use default values.
423 	 */
424 	rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ?
425 			rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH);
426 	if (rx_free_thresh >= (rq_depth - 1)) {
427 		PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)",
428 			    (unsigned int)rx_free_thresh,
429 			    (int)dev->data->port_id,
430 			    (int)queue_idx);
431 		return -EINVAL;
432 	}
433 
434 	rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq),
435 				 RTE_CACHE_LINE_SIZE, socket_id);
436 	if (!rxq) {
437 		PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s",
438 			    queue_idx, dev->data->name);
439 		return -ENOMEM;
440 	}
441 	nic_dev->rxqs[queue_idx] = rxq;
442 
443 	/* alloc rx sq hw wqe page */
444 	rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
445 	if (rc) {
446 		PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
447 			    queue_idx, dev->data->name, rq_depth);
448 		goto ceate_rq_fail;
449 	}
450 
451 	/* mbuf pool must be assigned before setup rx resources */
452 	rxq->mb_pool = mp;
453 
454 	rc =
455 	hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) -
456 				  RTE_PKTMBUF_HEADROOM, &buf_size);
457 	if (rc) {
458 		PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s",
459 			    dev->data->name);
460 		goto adjust_bufsize_fail;
461 	}
462 
463 	/* rx queue info, rearm control */
464 	rxq->wq = &hwdev->nic_io->rq_wq[queue_idx];
465 	rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr;
466 	rxq->nic_dev = nic_dev;
467 	rxq->q_id = queue_idx;
468 	rxq->q_depth = rq_depth;
469 	rxq->buf_len = (u16)buf_size;
470 	rxq->rx_free_thresh = rx_free_thresh;
471 	rxq->socket_id = socket_id;
472 
473 	/* the last point cant do mbuf rearm in bulk */
474 	rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
475 
476 	/* device port identifier */
477 	rxq->port_id = dev->data->port_id;
478 
479 	/* alloc rx_cqe and prepare rq_wqe */
480 	rc = hinic_setup_rx_resources(rxq);
481 	if (rc) {
482 		PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name: %s",
483 			    queue_idx, dev->data->name);
484 		goto setup_rx_res_err;
485 	}
486 
487 	/* record nic_dev rxq in rte_eth rx_queues */
488 	dev->data->rx_queues[queue_idx] = rxq;
489 
490 	return 0;
491 
492 setup_rx_res_err:
493 adjust_bufsize_fail:
494 	hinic_destroy_rq(hwdev, queue_idx);
495 
496 ceate_rq_fail:
497 	rte_free(rxq);
498 
499 	return rc;
500 }
501 
502 static void hinic_reset_rx_queue(struct rte_eth_dev *dev)
503 {
504 	struct hinic_rxq *rxq;
505 	struct hinic_nic_dev *nic_dev;
506 	int q_id = 0;
507 
508 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
509 
510 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
511 		rxq = dev->data->rx_queues[q_id];
512 
513 		rxq->wq->cons_idx = 0;
514 		rxq->wq->prod_idx = 0;
515 		rxq->wq->delta = rxq->q_depth;
516 		rxq->wq->mask = rxq->q_depth - 1;
517 
518 		/* alloc mbuf to rq */
519 		hinic_rx_alloc_pkts(rxq);
520 	}
521 }
522 
523 /**
524  * DPDK callback to configure the transmit queue.
525  *
526  * @param dev
527  *   Pointer to Ethernet device structure.
528  * @param queue_idx
529  *   Transmit queue index.
530  * @param nb_desc
531  *   Number of descriptors for transmit queue.
532  * @param socket_id
533  *   NUMA socket on which memory must be allocated.
534  * @param tx_conf
535  *   Tx queue configuration parameters.
536  *
537  * @return
538  *   0 on success, negative error value otherwise.
539  */
540 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
541 			 uint16_t nb_desc, unsigned int socket_id,
542 			 __rte_unused const struct rte_eth_txconf *tx_conf)
543 {
544 	int rc;
545 	struct hinic_nic_dev *nic_dev;
546 	struct hinic_hwdev *hwdev;
547 	struct hinic_txq *txq;
548 	u16 sq_depth, tx_free_thresh;
549 
550 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
551 	hwdev = nic_dev->hwdev;
552 
553 	/* queue depth must be power of 2, otherwise will be aligned up */
554 	sq_depth = (nb_desc & (nb_desc - 1)) ?
555 			((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
556 
557 	/*
558 	 * Validate number of transmit descriptors.
559 	 * It must not exceed hardware maximum and minimum.
560 	 */
561 	if (sq_depth > HINIC_MAX_QUEUE_DEPTH ||
562 		sq_depth < HINIC_MIN_QUEUE_DEPTH) {
563 		PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
564 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
565 			  (int)nb_desc, (int)sq_depth,
566 			  (int)dev->data->port_id, (int)queue_idx);
567 		return -EINVAL;
568 	}
569 
570 	/*
571 	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
572 	 * descriptors are used or if the number of descriptors required
573 	 * to transmit a packet is greater than the number of free TX
574 	 * descriptors.
575 	 * The following constraints must be satisfied:
576 	 *  tx_free_thresh must be greater than 0.
577 	 *  tx_free_thresh must be less than the size of the ring minus 1.
578 	 * When set to zero use default values.
579 	 */
580 	tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ?
581 			tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH);
582 	if (tx_free_thresh >= (sq_depth - 1)) {
583 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)",
584 			(unsigned int)tx_free_thresh, (int)dev->data->port_id,
585 			(int)queue_idx);
586 		return -EINVAL;
587 	}
588 
589 	txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq),
590 				 RTE_CACHE_LINE_SIZE, socket_id);
591 	if (!txq) {
592 		PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s",
593 			    queue_idx, dev->data->name);
594 		return -ENOMEM;
595 	}
596 	nic_dev->txqs[queue_idx] = txq;
597 
598 	/* alloc tx sq hw wqepage */
599 	rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
600 	if (rc) {
601 		PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
602 			    queue_idx, dev->data->name, sq_depth);
603 		goto create_sq_fail;
604 	}
605 
606 	txq->q_id = queue_idx;
607 	txq->q_depth = sq_depth;
608 	txq->port_id = dev->data->port_id;
609 	txq->tx_free_thresh = tx_free_thresh;
610 	txq->nic_dev = nic_dev;
611 	txq->wq = &hwdev->nic_io->sq_wq[queue_idx];
612 	txq->sq = &hwdev->nic_io->qps[queue_idx].sq;
613 	txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr;
614 	txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq);
615 	txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
616 					sizeof(struct hinic_sq_bufdesc);
617 	txq->cos = nic_dev->default_cos;
618 	txq->socket_id = socket_id;
619 
620 	/* alloc software txinfo */
621 	rc = hinic_setup_tx_resources(txq);
622 	if (rc) {
623 		PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s",
624 			    queue_idx, dev->data->name);
625 		goto setup_tx_res_fail;
626 	}
627 
628 	/* record nic_dev txq in rte_eth tx_queues */
629 	dev->data->tx_queues[queue_idx] = txq;
630 
631 	return HINIC_OK;
632 
633 setup_tx_res_fail:
634 	hinic_destroy_sq(hwdev, queue_idx);
635 
636 create_sq_fail:
637 	rte_free(txq);
638 
639 	return rc;
640 }
641 
642 static void hinic_reset_tx_queue(struct rte_eth_dev *dev)
643 {
644 	struct hinic_nic_dev *nic_dev;
645 	struct hinic_txq *txq;
646 	struct hinic_nic_io *nic_io;
647 	struct hinic_hwdev *hwdev;
648 	volatile u32 *ci_addr;
649 	int q_id = 0;
650 
651 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
652 	hwdev = nic_dev->hwdev;
653 	nic_io = hwdev->nic_io;
654 
655 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
656 		txq = dev->data->tx_queues[q_id];
657 
658 		txq->wq->cons_idx = 0;
659 		txq->wq->prod_idx = 0;
660 		txq->wq->delta = txq->q_depth;
661 		txq->wq->mask  = txq->q_depth - 1;
662 
663 		/* clear hardware ci */
664 		ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base,
665 							q_id);
666 		*ci_addr = 0;
667 	}
668 }
669 
670 /**
671  * Get link speed from NIC.
672  *
673  * @param dev
674  *   Pointer to Ethernet device structure.
675  * @param speed_capa
676  *   Pointer to link speed structure.
677  */
678 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
679 {
680 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
681 	u32 supported_link, advertised_link;
682 	int err;
683 
684 #define HINIC_LINK_MODE_SUPPORT_1G	(1U << HINIC_GE_BASE_KX)
685 
686 #define HINIC_LINK_MODE_SUPPORT_10G	(1U << HINIC_10GE_BASE_KR)
687 
688 #define HINIC_LINK_MODE_SUPPORT_25G	((1U << HINIC_25GE_BASE_KR_S) | \
689 					(1U << HINIC_25GE_BASE_CR_S) | \
690 					(1U << HINIC_25GE_BASE_KR) | \
691 					(1U << HINIC_25GE_BASE_CR))
692 
693 #define HINIC_LINK_MODE_SUPPORT_40G	((1U << HINIC_40GE_BASE_KR4) | \
694 					(1U << HINIC_40GE_BASE_CR4))
695 
696 #define HINIC_LINK_MODE_SUPPORT_100G	((1U << HINIC_100GE_BASE_KR4) | \
697 					(1U << HINIC_100GE_BASE_CR4))
698 
699 	err = hinic_get_link_mode(nic_dev->hwdev,
700 				  &supported_link, &advertised_link);
701 	if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
702 	    advertised_link == HINIC_SUPPORTED_UNKNOWN) {
703 		PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u",
704 			  nic_dev->proc_dev_name, dev->data->port_id);
705 	} else {
706 		*speed_capa = 0;
707 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
708 			*speed_capa |= ETH_LINK_SPEED_1G;
709 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
710 			*speed_capa |= ETH_LINK_SPEED_10G;
711 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
712 			*speed_capa |= ETH_LINK_SPEED_25G;
713 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
714 			*speed_capa |= ETH_LINK_SPEED_40G;
715 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
716 			*speed_capa |= ETH_LINK_SPEED_100G;
717 	}
718 }
719 
720 /**
721  * DPDK callback to get information about the device.
722  *
723  * @param dev
724  *   Pointer to Ethernet device structure.
725  * @param info
726  *   Pointer to Info structure output buffer.
727  */
728 static int
729 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
730 {
731 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
732 
733 	info->max_rx_queues  = nic_dev->nic_cap.max_rqs;
734 	info->max_tx_queues  = nic_dev->nic_cap.max_sqs;
735 	info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE;
736 	info->max_rx_pktlen  = HINIC_MAX_JUMBO_FRAME_SIZE;
737 	info->max_mac_addrs  = HINIC_MAX_UC_MAC_ADDRS;
738 	info->min_mtu = HINIC_MIN_MTU_SIZE;
739 	info->max_mtu = HINIC_MAX_MTU_SIZE;
740 	info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE;
741 
742 	hinic_get_speed_capa(dev, &info->speed_capa);
743 	info->rx_queue_offload_capa = 0;
744 	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
745 				DEV_RX_OFFLOAD_IPV4_CKSUM |
746 				DEV_RX_OFFLOAD_UDP_CKSUM |
747 				DEV_RX_OFFLOAD_TCP_CKSUM |
748 				DEV_RX_OFFLOAD_VLAN_FILTER |
749 				DEV_RX_OFFLOAD_SCATTER |
750 				DEV_RX_OFFLOAD_JUMBO_FRAME |
751 				DEV_RX_OFFLOAD_TCP_LRO |
752 				DEV_RX_OFFLOAD_RSS_HASH;
753 
754 	info->tx_queue_offload_capa = 0;
755 	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
756 				DEV_TX_OFFLOAD_IPV4_CKSUM |
757 				DEV_TX_OFFLOAD_UDP_CKSUM |
758 				DEV_TX_OFFLOAD_TCP_CKSUM |
759 				DEV_TX_OFFLOAD_SCTP_CKSUM |
760 				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
761 				DEV_TX_OFFLOAD_TCP_TSO |
762 				DEV_TX_OFFLOAD_MULTI_SEGS;
763 
764 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
765 	info->reta_size = HINIC_RSS_INDIR_SIZE;
766 	info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
767 	info->rx_desc_lim = hinic_rx_desc_lim;
768 	info->tx_desc_lim = hinic_tx_desc_lim;
769 
770 	/* Driver-preferred Rx/Tx parameters */
771 	info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
772 	info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
773 	info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
774 	info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
775 	info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
776 	info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
777 
778 	return 0;
779 }
780 
781 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
782 				size_t fw_size)
783 {
784 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
785 	char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
786 	int err;
787 
788 	err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
789 	if (err) {
790 		PMD_DRV_LOG(ERR, "Failed to get fw version");
791 		return -EINVAL;
792 	}
793 
794 	if (fw_size < strlen(fw_ver) + 1)
795 		return (strlen(fw_ver) + 1);
796 
797 	snprintf(fw_version, fw_size, "%s", fw_ver);
798 
799 	return 0;
800 }
801 
802 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
803 {
804 	int err;
805 
806 	err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl);
807 	if (err) {
808 		PMD_DRV_LOG(ERR, "Failed to set rx mode");
809 		return -EINVAL;
810 	}
811 	nic_dev->rx_mode_status = rx_mode_ctrl;
812 
813 	return 0;
814 }
815 
816 static int hinic_rxtx_configure(struct rte_eth_dev *dev)
817 {
818 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
819 	int err;
820 
821 	/* rx configure, if rss enable, need to init default configuration */
822 	err = hinic_rx_configure(dev);
823 	if (err) {
824 		PMD_DRV_LOG(ERR, "Configure rss failed");
825 		return err;
826 	}
827 
828 	/* rx mode init */
829 	err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE);
830 	if (err) {
831 		PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed",
832 			HINIC_DEFAULT_RX_MODE);
833 		goto set_rx_mode_fail;
834 	}
835 
836 	return HINIC_OK;
837 
838 set_rx_mode_fail:
839 	hinic_rx_remove_configure(dev);
840 
841 	return err;
842 }
843 
844 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev)
845 {
846 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
847 
848 	(void)hinic_config_rx_mode(nic_dev, 0);
849 	hinic_rx_remove_configure(dev);
850 }
851 
852 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
853 					  struct rte_eth_link *link)
854 {
855 	int rc;
856 	u8 port_link_status = 0;
857 	struct nic_port_info port_link_info;
858 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
859 	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
860 					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
861 					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
862 					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
863 
864 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
865 	if (rc)
866 		return rc;
867 
868 	if (!port_link_status) {
869 		link->link_status = ETH_LINK_DOWN;
870 		link->link_speed = 0;
871 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
872 		link->link_autoneg = ETH_LINK_FIXED;
873 		return HINIC_OK;
874 	}
875 
876 	memset(&port_link_info, 0, sizeof(port_link_info));
877 	rc = hinic_get_port_info(nic_hwdev, &port_link_info);
878 	if (rc)
879 		return rc;
880 
881 	link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX];
882 	link->link_duplex = port_link_info.duplex;
883 	link->link_autoneg = port_link_info.autoneg_state;
884 	link->link_status = port_link_status;
885 
886 	return HINIC_OK;
887 }
888 
889 /**
890  * DPDK callback to retrieve physical link information.
891  *
892  * @param dev
893  *   Pointer to Ethernet device structure.
894  * @param wait_to_complete
895  *   Wait for request completion.
896  *
897  * @return
898  *   0 link status changed, -1 link status not changed
899  */
900 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
901 {
902 #define CHECK_INTERVAL 10  /* 10ms */
903 #define MAX_REPEAT_TIME 100  /* 1s (100 * 10ms) in total */
904 	int rc = HINIC_OK;
905 	struct rte_eth_link link;
906 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
907 	unsigned int rep_cnt = MAX_REPEAT_TIME;
908 
909 	memset(&link, 0, sizeof(link));
910 	do {
911 		/* Get link status information from hardware */
912 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
913 		if (rc != HINIC_OK) {
914 			link.link_speed = ETH_SPEED_NUM_NONE;
915 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
916 			PMD_DRV_LOG(ERR, "Get link status failed");
917 			goto out;
918 		}
919 
920 		if (!wait_to_complete || link.link_status)
921 			break;
922 
923 		rte_delay_ms(CHECK_INTERVAL);
924 	} while (rep_cnt--);
925 
926 out:
927 	rc = rte_eth_linkstatus_set(dev, &link);
928 	return rc;
929 }
930 
931 /**
932  * DPDK callback to bring the link UP.
933  *
934  * @param dev
935  *   Pointer to Ethernet device structure.
936  *
937  * @return
938  *   0 on success, negative errno value on failure.
939  */
940 static int hinic_dev_set_link_up(struct rte_eth_dev *dev)
941 {
942 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
943 	int ret;
944 
945 	/* link status follow phy port status, up will open pma */
946 	ret = hinic_set_port_enable(nic_dev->hwdev, true);
947 	if (ret)
948 		PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d",
949 			    nic_dev->proc_dev_name, dev->data->port_id);
950 
951 	return ret;
952 }
953 
954 /**
955  * DPDK callback to bring the link DOWN.
956  *
957  * @param dev
958  *   Pointer to Ethernet device structure.
959  *
960  * @return
961  *   0 on success, negative errno value on failure.
962  */
963 static int hinic_dev_set_link_down(struct rte_eth_dev *dev)
964 {
965 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
966 	int ret;
967 
968 	/* link status follow phy port status, up will close pma */
969 	ret = hinic_set_port_enable(nic_dev->hwdev, false);
970 	if (ret)
971 		PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d",
972 			    nic_dev->proc_dev_name, dev->data->port_id);
973 
974 	return ret;
975 }
976 
977 /**
978  * DPDK callback to start the device.
979  *
980  * @param dev
981  *   Pointer to Ethernet device structure.
982  *
983  * @return
984  *   0 on success, negative errno value on failure.
985  */
986 static int hinic_dev_start(struct rte_eth_dev *dev)
987 {
988 	int rc;
989 	char *name;
990 	struct hinic_nic_dev *nic_dev;
991 
992 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
993 	name = dev->data->name;
994 
995 	/* reset rx and tx queue */
996 	hinic_reset_rx_queue(dev);
997 	hinic_reset_tx_queue(dev);
998 
999 	/* get func rx buf size */
1000 	hinic_get_func_rx_buf_size(nic_dev);
1001 
1002 	/* init txq and rxq context */
1003 	rc = hinic_init_qp_ctxts(nic_dev->hwdev);
1004 	if (rc) {
1005 		PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name: %s",
1006 			    name);
1007 		goto init_qp_fail;
1008 	}
1009 
1010 	/* rss template */
1011 	rc = hinic_config_mq_mode(dev, TRUE);
1012 	if (rc) {
1013 		PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s",
1014 			    name);
1015 		goto cfg_mq_mode_fail;
1016 	}
1017 
1018 	/* set default mtu */
1019 	rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size);
1020 	if (rc) {
1021 		PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s",
1022 			    nic_dev->mtu_size, name);
1023 		goto set_mtu_fail;
1024 	}
1025 
1026 	/* configure rss rx_mode and other rx or tx default feature */
1027 	rc = hinic_rxtx_configure(dev);
1028 	if (rc) {
1029 		PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s",
1030 			    name);
1031 		goto cfg_rxtx_fail;
1032 	}
1033 
1034 	/* reactive pf status, so that uP report asyn event */
1035 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
1036 
1037 	/* open virtual port and ready to start packet receiving */
1038 	rc = hinic_set_vport_enable(nic_dev->hwdev, true);
1039 	if (rc) {
1040 		PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name);
1041 		goto en_vport_fail;
1042 	}
1043 
1044 	/* open physical port and start packet receiving */
1045 	rc = hinic_set_port_enable(nic_dev->hwdev, true);
1046 	if (rc) {
1047 		PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name: %s",
1048 			    name);
1049 		goto en_port_fail;
1050 	}
1051 
1052 	/* update eth_dev link status */
1053 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1054 		(void)hinic_link_update(dev, 0);
1055 
1056 	rte_bit_relaxed_set32(HINIC_DEV_START, &nic_dev->dev_status);
1057 
1058 	return 0;
1059 
1060 en_port_fail:
1061 	(void)hinic_set_vport_enable(nic_dev->hwdev, false);
1062 
1063 en_vport_fail:
1064 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT);
1065 
1066 	/* Flush tx && rx chip resources in case of set vport fake fail */
1067 	(void)hinic_flush_qp_res(nic_dev->hwdev);
1068 	rte_delay_ms(100);
1069 
1070 	hinic_remove_rxtx_configure(dev);
1071 
1072 cfg_rxtx_fail:
1073 set_mtu_fail:
1074 cfg_mq_mode_fail:
1075 	hinic_free_qp_ctxts(nic_dev->hwdev);
1076 
1077 init_qp_fail:
1078 	hinic_free_all_rx_mbuf(dev);
1079 	hinic_free_all_tx_mbuf(dev);
1080 
1081 	return rc;
1082 }
1083 
1084 /**
1085  * DPDK callback to release the receive queue.
1086  *
1087  * @param queue
1088  *   Generic receive queue pointer.
1089  */
1090 static void hinic_rx_queue_release(void *queue)
1091 {
1092 	struct hinic_rxq *rxq = queue;
1093 	struct hinic_nic_dev *nic_dev;
1094 
1095 	if (!rxq) {
1096 		PMD_DRV_LOG(WARNING, "Rxq is null when release");
1097 		return;
1098 	}
1099 	nic_dev = rxq->nic_dev;
1100 
1101 	/* free rxq_pkt mbuf */
1102 	hinic_free_all_rx_mbufs(rxq);
1103 
1104 	/* free rxq_cqe, rxq_info */
1105 	hinic_free_rx_resources(rxq);
1106 
1107 	/* free root rq wq */
1108 	hinic_destroy_rq(nic_dev->hwdev, rxq->q_id);
1109 
1110 	nic_dev->rxqs[rxq->q_id] = NULL;
1111 
1112 	/* free rxq */
1113 	rte_free(rxq);
1114 }
1115 
1116 /**
1117  * DPDK callback to release the transmit queue.
1118  *
1119  * @param queue
1120  *   Generic transmit queue pointer.
1121  */
1122 static void hinic_tx_queue_release(void *queue)
1123 {
1124 	struct hinic_txq *txq = queue;
1125 	struct hinic_nic_dev *nic_dev;
1126 
1127 	if (!txq) {
1128 		PMD_DRV_LOG(WARNING, "Txq is null when release");
1129 		return;
1130 	}
1131 	nic_dev = txq->nic_dev;
1132 
1133 	/* free txq_pkt mbuf */
1134 	hinic_free_all_tx_mbufs(txq);
1135 
1136 	/* free txq_info */
1137 	hinic_free_tx_resources(txq);
1138 
1139 	/* free root sq wq */
1140 	hinic_destroy_sq(nic_dev->hwdev, txq->q_id);
1141 	nic_dev->txqs[txq->q_id] = NULL;
1142 
1143 	/* free txq */
1144 	rte_free(txq);
1145 }
1146 
1147 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev)
1148 {
1149 	u16 q_id;
1150 
1151 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
1152 		hinic_destroy_rq(nic_dev->hwdev, q_id);
1153 }
1154 
1155 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev)
1156 {
1157 	u16 q_id;
1158 
1159 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
1160 		hinic_destroy_sq(nic_dev->hwdev, q_id);
1161 }
1162 
1163 /**
1164  * DPDK callback to stop the device.
1165  *
1166  * @param dev
1167  *   Pointer to Ethernet device structure.
1168  */
1169 static int hinic_dev_stop(struct rte_eth_dev *dev)
1170 {
1171 	int rc;
1172 	char *name;
1173 	uint16_t port_id;
1174 	struct hinic_nic_dev *nic_dev;
1175 	struct rte_eth_link link;
1176 
1177 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1178 	name = dev->data->name;
1179 	port_id = dev->data->port_id;
1180 
1181 	dev->data->dev_started = 0;
1182 
1183 	if (!rte_bit_relaxed_test_and_clear32(HINIC_DEV_START,
1184 					      &nic_dev->dev_status)) {
1185 		PMD_DRV_LOG(INFO, "Device %s already stopped", name);
1186 		return 0;
1187 	}
1188 
1189 	/* just stop phy port and vport */
1190 	rc = hinic_set_port_enable(nic_dev->hwdev, false);
1191 	if (rc)
1192 		PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name: %s, port_id: %d",
1193 			  rc, name, port_id);
1194 
1195 	rc = hinic_set_vport_enable(nic_dev->hwdev, false);
1196 	if (rc)
1197 		PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name: %s, port_id: %d",
1198 			  rc, name, port_id);
1199 
1200 	/* Clear recorded link status */
1201 	memset(&link, 0, sizeof(link));
1202 	(void)rte_eth_linkstatus_set(dev, &link);
1203 
1204 	/* flush pending io request */
1205 	rc = hinic_rx_tx_flush(nic_dev->hwdev);
1206 	if (rc)
1207 		PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d",
1208 			    rc, name, port_id);
1209 
1210 	/* clean rss table and rx_mode */
1211 	hinic_remove_rxtx_configure(dev);
1212 
1213 	/* clean root context */
1214 	hinic_free_qp_ctxts(nic_dev->hwdev);
1215 
1216 	hinic_destroy_fdir_filter(dev);
1217 
1218 	/* free mbuf */
1219 	hinic_free_all_rx_mbuf(dev);
1220 	hinic_free_all_tx_mbuf(dev);
1221 
1222 	return 0;
1223 }
1224 
1225 static void hinic_disable_interrupt(struct rte_eth_dev *dev)
1226 {
1227 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1228 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1229 	int ret, retries = 0;
1230 
1231 	rte_bit_relaxed_clear32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
1232 
1233 	/* disable msix interrupt in hardware */
1234 	hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE);
1235 
1236 	/* disable rte interrupt */
1237 	ret = rte_intr_disable(&pci_dev->intr_handle);
1238 	if (ret)
1239 		PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret);
1240 
1241 	do {
1242 		ret =
1243 		rte_intr_callback_unregister(&pci_dev->intr_handle,
1244 					     hinic_dev_interrupt_handler, dev);
1245 		if (ret >= 0) {
1246 			break;
1247 		} else if (ret == -EAGAIN) {
1248 			rte_delay_ms(100);
1249 			retries++;
1250 		} else {
1251 			PMD_DRV_LOG(ERR, "intr callback unregister failed: %d",
1252 				    ret);
1253 			break;
1254 		}
1255 	} while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES);
1256 
1257 	if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES)
1258 		PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries",
1259 			    retries);
1260 
1261 	rte_bit_relaxed_clear32(HINIC_DEV_INIT, &nic_dev->dev_status);
1262 }
1263 
1264 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable)
1265 {
1266 	u32 rx_mode_ctrl;
1267 	int err;
1268 
1269 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1270 	if (err)
1271 		return err;
1272 
1273 	rx_mode_ctrl = nic_dev->rx_mode_status;
1274 
1275 	if (enable)
1276 		rx_mode_ctrl |= HINIC_RX_MODE_PROMISC;
1277 	else
1278 		rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC);
1279 
1280 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1281 
1282 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1283 
1284 	return err;
1285 }
1286 
1287 /**
1288  * DPDK callback to get device statistics.
1289  *
1290  * @param dev
1291  *   Pointer to Ethernet device structure.
1292  * @param stats
1293  *   Stats structure output buffer.
1294  *
1295  * @return
1296  *   0 on success and stats is filled,
1297  *   negative error value otherwise.
1298  */
1299 static int
1300 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1301 {
1302 	int i, err, q_num;
1303 	u64 rx_discards_pmd = 0;
1304 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1305 	struct hinic_vport_stats vport_stats;
1306 	struct hinic_rxq	*rxq = NULL;
1307 	struct hinic_rxq_stats rxq_stats;
1308 	struct hinic_txq	*txq = NULL;
1309 	struct hinic_txq_stats txq_stats;
1310 
1311 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
1312 	if (err) {
1313 		PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s",
1314 			nic_dev->proc_dev_name);
1315 		return err;
1316 	}
1317 
1318 	dev->data->rx_mbuf_alloc_failed = 0;
1319 
1320 	/* rx queue stats */
1321 	q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1322 			nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1323 	for (i = 0; i < q_num; i++) {
1324 		rxq = nic_dev->rxqs[i];
1325 		hinic_rxq_get_stats(rxq, &rxq_stats);
1326 		stats->q_ipackets[i] = rxq_stats.packets;
1327 		stats->q_ibytes[i] = rxq_stats.bytes;
1328 		stats->q_errors[i] = rxq_stats.rx_discards;
1329 
1330 		stats->ierrors += rxq_stats.errors;
1331 		rx_discards_pmd += rxq_stats.rx_discards;
1332 		dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf;
1333 	}
1334 
1335 	/* tx queue stats */
1336 	q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1337 		nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1338 	for (i = 0; i < q_num; i++) {
1339 		txq = nic_dev->txqs[i];
1340 		hinic_txq_get_stats(txq, &txq_stats);
1341 		stats->q_opackets[i] = txq_stats.packets;
1342 		stats->q_obytes[i] = txq_stats.bytes;
1343 		stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs);
1344 	}
1345 
1346 	/* vport stats */
1347 	stats->oerrors += vport_stats.tx_discard_vport;
1348 
1349 	stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd;
1350 
1351 	stats->ipackets = (vport_stats.rx_unicast_pkts_vport +
1352 			vport_stats.rx_multicast_pkts_vport +
1353 			vport_stats.rx_broadcast_pkts_vport -
1354 			rx_discards_pmd);
1355 
1356 	stats->opackets = (vport_stats.tx_unicast_pkts_vport +
1357 			vport_stats.tx_multicast_pkts_vport +
1358 			vport_stats.tx_broadcast_pkts_vport);
1359 
1360 	stats->ibytes = (vport_stats.rx_unicast_bytes_vport +
1361 			vport_stats.rx_multicast_bytes_vport +
1362 			vport_stats.rx_broadcast_bytes_vport);
1363 
1364 	stats->obytes = (vport_stats.tx_unicast_bytes_vport +
1365 			vport_stats.tx_multicast_bytes_vport +
1366 			vport_stats.tx_broadcast_bytes_vport);
1367 	return 0;
1368 }
1369 
1370 /**
1371  * DPDK callback to clear device statistics.
1372  *
1373  * @param dev
1374  *   Pointer to Ethernet device structure.
1375  */
1376 static int hinic_dev_stats_reset(struct rte_eth_dev *dev)
1377 {
1378 	int qid;
1379 	struct hinic_rxq	*rxq = NULL;
1380 	struct hinic_txq	*txq = NULL;
1381 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1382 	int ret;
1383 
1384 	ret = hinic_clear_vport_stats(nic_dev->hwdev);
1385 	if (ret != 0)
1386 		return ret;
1387 
1388 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
1389 		rxq = nic_dev->rxqs[qid];
1390 		hinic_rxq_stats_reset(rxq);
1391 	}
1392 
1393 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
1394 		txq = nic_dev->txqs[qid];
1395 		hinic_txq_stats_reset(txq);
1396 	}
1397 
1398 	return 0;
1399 }
1400 
1401 /**
1402  * DPDK callback to clear device extended statistics.
1403  *
1404  * @param dev
1405  *   Pointer to Ethernet device structure.
1406  */
1407 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev)
1408 {
1409 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1410 	int ret;
1411 
1412 	ret = hinic_dev_stats_reset(dev);
1413 	if (ret != 0)
1414 		return ret;
1415 
1416 	if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) {
1417 		ret = hinic_clear_phy_port_stats(nic_dev->hwdev);
1418 		if (ret != 0)
1419 			return ret;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr)
1426 {
1427 	uint64_t random_value;
1428 
1429 	/* Set Organizationally Unique Identifier (OUI) prefix */
1430 	mac_addr->addr_bytes[0] = 0x00;
1431 	mac_addr->addr_bytes[1] = 0x09;
1432 	mac_addr->addr_bytes[2] = 0xC0;
1433 	/* Force indication of locally assigned MAC address. */
1434 	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1435 	/* Generate the last 3 bytes of the MAC address with a random number. */
1436 	random_value = rte_rand();
1437 	memcpy(&mac_addr->addr_bytes[3], &random_value, 3);
1438 }
1439 
1440 /**
1441  * Init mac_vlan table in NIC.
1442  *
1443  * @param dev
1444  *   Pointer to Ethernet device structure.
1445  *
1446  * @return
1447  *   0 on success and stats is filled,
1448  *   negative error value otherwise.
1449  */
1450 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev)
1451 {
1452 	struct hinic_nic_dev *nic_dev =
1453 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1454 	uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
1455 	u16 func_id = 0;
1456 	int rc = 0;
1457 
1458 	rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes);
1459 	if (rc)
1460 		return rc;
1461 
1462 	rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes,
1463 		&eth_dev->data->mac_addrs[0]);
1464 	if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[0]))
1465 		hinic_gen_random_mac_addr(&eth_dev->data->mac_addrs[0]);
1466 
1467 	func_id = hinic_global_func_id(nic_dev->hwdev);
1468 	rc = hinic_set_mac(nic_dev->hwdev,
1469 			eth_dev->data->mac_addrs[0].addr_bytes,
1470 			0, func_id);
1471 	if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1472 		return rc;
1473 
1474 	rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
1475 			&nic_dev->default_addr);
1476 
1477 	return 0;
1478 }
1479 
1480 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev)
1481 {
1482 	u16 func_id;
1483 	u32 i;
1484 
1485 	func_id = hinic_global_func_id(nic_dev->hwdev);
1486 
1487 	for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) {
1488 		if (rte_is_zero_ether_addr(&nic_dev->mc_list[i]))
1489 			break;
1490 
1491 		hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes,
1492 			      0, func_id);
1493 		memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr));
1494 	}
1495 }
1496 
1497 /**
1498  * Deinit mac_vlan table in NIC.
1499  *
1500  * @param dev
1501  *   Pointer to Ethernet device structure.
1502  *
1503  * @return
1504  *   0 on success and stats is filled,
1505  *   negative error value otherwise.
1506  */
1507 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
1508 {
1509 	struct hinic_nic_dev *nic_dev =
1510 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1511 	u16 func_id = 0;
1512 	int rc;
1513 	int i;
1514 
1515 	func_id = hinic_global_func_id(nic_dev->hwdev);
1516 
1517 	for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) {
1518 		if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[i]))
1519 			continue;
1520 
1521 		rc = hinic_del_mac(nic_dev->hwdev,
1522 				   eth_dev->data->mac_addrs[i].addr_bytes,
1523 				   0, func_id);
1524 		if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1525 			PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s",
1526 				    eth_dev->data->name);
1527 
1528 		memset(&eth_dev->data->mac_addrs[i], 0,
1529 		       sizeof(struct rte_ether_addr));
1530 	}
1531 
1532 	/* delete multicast mac addrs */
1533 	hinic_delete_mc_addr_list(nic_dev);
1534 
1535 	rte_free(nic_dev->mc_list);
1536 
1537 }
1538 
1539 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1540 {
1541 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1542 	uint32_t frame_size;
1543 	int ret = 0;
1544 
1545 	PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
1546 			dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
1547 
1548 	if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
1549 		PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
1550 				mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
1551 		return -EINVAL;
1552 	}
1553 
1554 	ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
1555 	if (ret) {
1556 		PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
1557 		return ret;
1558 	}
1559 
1560 	/* update max frame size */
1561 	frame_size = HINIC_MTU_TO_PKTLEN(mtu);
1562 	if (frame_size > HINIC_ETH_MAX_LEN)
1563 		dev->data->dev_conf.rxmode.offloads |=
1564 			DEV_RX_OFFLOAD_JUMBO_FRAME;
1565 	else
1566 		dev->data->dev_conf.rxmode.offloads &=
1567 			~DEV_RX_OFFLOAD_JUMBO_FRAME;
1568 
1569 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1570 	nic_dev->mtu_size = mtu;
1571 
1572 	return ret;
1573 }
1574 
1575 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev,
1576 					u16 vlan_id, bool on)
1577 {
1578 	u32 vid_idx, vid_bit;
1579 
1580 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1581 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1582 
1583 	if (on)
1584 		nic_dev->vfta[vid_idx] |= vid_bit;
1585 	else
1586 		nic_dev->vfta[vid_idx] &= ~vid_bit;
1587 }
1588 
1589 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev,
1590 				uint16_t vlan_id)
1591 {
1592 	u32 vid_idx, vid_bit;
1593 
1594 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1595 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1596 
1597 	return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE;
1598 }
1599 
1600 /**
1601  * DPDK callback to set vlan filter.
1602  *
1603  * @param dev
1604  *   Pointer to Ethernet device structure.
1605  * @param vlan_id
1606  *   vlan id is used to filter vlan packets
1607  * @param enable
1608  *   enable disable or enable vlan filter function
1609  */
1610 static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
1611 				uint16_t vlan_id, int enable)
1612 {
1613 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1614 	int err = 0;
1615 	u16 func_id;
1616 
1617 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
1618 		return -EINVAL;
1619 
1620 	func_id = hinic_global_func_id(nic_dev->hwdev);
1621 
1622 	if (enable) {
1623 		/* If vlanid is already set, just return */
1624 		if (hinic_find_vlan_filter(nic_dev, vlan_id)) {
1625 			PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s",
1626 				  vlan_id, nic_dev->proc_dev_name);
1627 			return 0;
1628 		}
1629 
1630 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1631 					    func_id, TRUE);
1632 	} else {
1633 		/* If vlanid can't be found, just return */
1634 		if (!hinic_find_vlan_filter(nic_dev, vlan_id)) {
1635 			PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s",
1636 				  vlan_id, nic_dev->proc_dev_name);
1637 			return 0;
1638 		}
1639 
1640 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1641 					    func_id, FALSE);
1642 	}
1643 
1644 	if (err) {
1645 		PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d",
1646 		      enable ? "Add" : "Remove", func_id, vlan_id, err);
1647 		return err;
1648 	}
1649 
1650 	hinic_store_vlan_filter(nic_dev, vlan_id, enable);
1651 
1652 	PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s",
1653 		  enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name);
1654 	return 0;
1655 }
1656 
1657 /**
1658  * DPDK callback to enable or disable vlan offload.
1659  *
1660  * @param dev
1661  *   Pointer to Ethernet device structure.
1662  * @param mask
1663  *   Definitions used for VLAN setting
1664  */
1665 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1666 {
1667 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1668 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1669 	bool on;
1670 	int err;
1671 
1672 	/* Enable or disable VLAN filter */
1673 	if (mask & ETH_VLAN_FILTER_MASK) {
1674 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
1675 			TRUE : FALSE;
1676 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
1677 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1678 			PMD_DRV_LOG(WARNING,
1679 				"Current matching version does not support vlan filter configuration, device: %s, port_id: %d",
1680 				  nic_dev->proc_dev_name, dev->data->port_id);
1681 		} else if (err) {
1682 			PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d",
1683 				  on ? "enable" : "disable",
1684 				  nic_dev->proc_dev_name,
1685 				  dev->data->port_id, err);
1686 			return err;
1687 		}
1688 
1689 		PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d",
1690 			  on ? "Enable" : "Disable",
1691 			  nic_dev->proc_dev_name, dev->data->port_id);
1692 	}
1693 
1694 	/* Enable or disable VLAN stripping */
1695 	if (mask & ETH_VLAN_STRIP_MASK) {
1696 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
1697 			TRUE : FALSE;
1698 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
1699 		if (err) {
1700 			PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d",
1701 				  on ? "enable" : "disable",
1702 				  nic_dev->proc_dev_name,
1703 				  dev->data->port_id, err);
1704 			return err;
1705 		}
1706 
1707 		PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d",
1708 			  on ? "Enable" : "Disable",
1709 			  nic_dev->proc_dev_name, dev->data->port_id);
1710 	}
1711 
1712 	return 0;
1713 }
1714 
1715 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev)
1716 {
1717 	struct hinic_nic_dev *nic_dev =
1718 		HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1719 	u16 func_id;
1720 	int i;
1721 
1722 	func_id = hinic_global_func_id(nic_dev->hwdev);
1723 	for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) {
1724 		/* If can't find it, continue */
1725 		if (!hinic_find_vlan_filter(nic_dev, i))
1726 			continue;
1727 
1728 		(void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE);
1729 		hinic_store_vlan_filter(nic_dev, i, false);
1730 	}
1731 }
1732 
1733 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev,
1734 				bool enable)
1735 {
1736 	u32 rx_mode_ctrl;
1737 	int err;
1738 
1739 	err = hinic_mutex_lock(&nic_dev->rx_mode_mutex);
1740 	if (err)
1741 		return err;
1742 
1743 	rx_mode_ctrl = nic_dev->rx_mode_status;
1744 
1745 	if (enable)
1746 		rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL;
1747 	else
1748 		rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL);
1749 
1750 	err = hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1751 
1752 	(void)hinic_mutex_unlock(&nic_dev->rx_mode_mutex);
1753 
1754 	return err;
1755 }
1756 
1757 /**
1758  * DPDK callback to enable allmulticast mode.
1759  *
1760  * @param dev
1761  *   Pointer to Ethernet device structure.
1762  *
1763  * @return
1764  *   0 on success,
1765  *   negative error value otherwise.
1766  */
1767 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev)
1768 {
1769 	int ret = HINIC_OK;
1770 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1771 
1772 	ret = hinic_set_dev_allmulticast(nic_dev, true);
1773 	if (ret) {
1774 		PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret);
1775 		return ret;
1776 	}
1777 
1778 	PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d",
1779 		nic_dev->proc_dev_name, dev->data->port_id);
1780 	return 0;
1781 }
1782 
1783 /**
1784  * DPDK callback to disable allmulticast mode.
1785  *
1786  * @param dev
1787  *   Pointer to Ethernet device structure.
1788  *
1789  * @return
1790  *   0 on success,
1791  *   negative error value otherwise.
1792  */
1793 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev)
1794 {
1795 	int ret = HINIC_OK;
1796 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1797 
1798 	ret = hinic_set_dev_allmulticast(nic_dev, false);
1799 	if (ret) {
1800 		PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret);
1801 		return ret;
1802 	}
1803 
1804 	PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d",
1805 		nic_dev->proc_dev_name, dev->data->port_id);
1806 	return 0;
1807 }
1808 
1809 /**
1810  * DPDK callback to enable promiscuous mode.
1811  *
1812  * @param dev
1813  *   Pointer to Ethernet device structure.
1814  *
1815  * @return
1816  *   0 on success,
1817  *   negative error value otherwise.
1818  */
1819 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev)
1820 {
1821 	int rc = HINIC_OK;
1822 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1823 
1824 	PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1825 		    nic_dev->proc_dev_name, dev->data->port_id,
1826 		    dev->data->promiscuous);
1827 
1828 	rc = hinic_set_dev_promiscuous(nic_dev, true);
1829 	if (rc)
1830 		PMD_DRV_LOG(ERR, "Enable promiscuous failed");
1831 
1832 	return rc;
1833 }
1834 
1835 /**
1836  * DPDK callback to disable promiscuous mode.
1837  *
1838  * @param dev
1839  *   Pointer to Ethernet device structure.
1840  *
1841  * @return
1842  *   0 on success,
1843  *   negative error value otherwise.
1844  */
1845 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev)
1846 {
1847 	int rc = HINIC_OK;
1848 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1849 
1850 	PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1851 		    nic_dev->proc_dev_name, dev->data->port_id,
1852 		    dev->data->promiscuous);
1853 
1854 	rc = hinic_set_dev_promiscuous(nic_dev, false);
1855 	if (rc)
1856 		PMD_DRV_LOG(ERR, "Disable promiscuous failed");
1857 
1858 	return rc;
1859 }
1860 
1861 static int hinic_flow_ctrl_get(struct rte_eth_dev *dev,
1862 			struct rte_eth_fc_conf *fc_conf)
1863 {
1864 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1865 	struct nic_pause_config nic_pause;
1866 	int err;
1867 
1868 	memset(&nic_pause, 0, sizeof(nic_pause));
1869 
1870 	err = hinic_get_pause_info(nic_dev->hwdev, &nic_pause);
1871 	if (err)
1872 		return err;
1873 
1874 	if (nic_dev->pause_set || !nic_pause.auto_neg) {
1875 		nic_pause.rx_pause = nic_dev->nic_pause.rx_pause;
1876 		nic_pause.tx_pause = nic_dev->nic_pause.tx_pause;
1877 	}
1878 
1879 	fc_conf->autoneg = nic_pause.auto_neg;
1880 
1881 	if (nic_pause.tx_pause && nic_pause.rx_pause)
1882 		fc_conf->mode = RTE_FC_FULL;
1883 	else if (nic_pause.tx_pause)
1884 		fc_conf->mode = RTE_FC_TX_PAUSE;
1885 	else if (nic_pause.rx_pause)
1886 		fc_conf->mode = RTE_FC_RX_PAUSE;
1887 	else
1888 		fc_conf->mode = RTE_FC_NONE;
1889 
1890 	return 0;
1891 }
1892 
1893 static int hinic_flow_ctrl_set(struct rte_eth_dev *dev,
1894 			struct rte_eth_fc_conf *fc_conf)
1895 {
1896 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1897 	struct nic_pause_config nic_pause;
1898 	int err;
1899 
1900 	nic_pause.auto_neg = fc_conf->autoneg;
1901 
1902 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1903 		(fc_conf->mode & RTE_FC_TX_PAUSE))
1904 		nic_pause.tx_pause = true;
1905 	else
1906 		nic_pause.tx_pause = false;
1907 
1908 	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1909 		(fc_conf->mode & RTE_FC_RX_PAUSE))
1910 		nic_pause.rx_pause = true;
1911 	else
1912 		nic_pause.rx_pause = false;
1913 
1914 	err = hinic_set_pause_config(nic_dev->hwdev, nic_pause);
1915 	if (err)
1916 		return err;
1917 
1918 	nic_dev->pause_set = true;
1919 	nic_dev->nic_pause.auto_neg = nic_pause.auto_neg;
1920 	nic_dev->nic_pause.rx_pause = nic_pause.rx_pause;
1921 	nic_dev->nic_pause.tx_pause = nic_pause.tx_pause;
1922 
1923 	PMD_DRV_LOG(INFO, "Set pause options, tx: %s, rx: %s, auto: %s\n",
1924 		nic_pause.tx_pause ? "on" : "off",
1925 		nic_pause.rx_pause ? "on" : "off",
1926 		nic_pause.auto_neg ? "on" : "off");
1927 
1928 	return 0;
1929 }
1930 
1931 /**
1932  * DPDK callback to update the RSS hash key and RSS hash type.
1933  *
1934  * @param dev
1935  *   Pointer to Ethernet device structure.
1936  * @param rss_conf
1937  *   RSS configuration data.
1938  *
1939  * @return
1940  *   0 on success, negative error value otherwise.
1941  */
1942 static int hinic_rss_hash_update(struct rte_eth_dev *dev,
1943 			  struct rte_eth_rss_conf *rss_conf)
1944 {
1945 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1946 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1947 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
1948 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
1949 	u64 rss_hf = rss_conf->rss_hf;
1950 	struct nic_rss_type rss_type = {0};
1951 	int err = 0;
1952 
1953 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
1954 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
1955 		return HINIC_OK;
1956 	}
1957 
1958 	if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) {
1959 		PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len: %d",
1960 			    rss_conf->rss_key_len);
1961 		return HINIC_ERROR;
1962 	}
1963 
1964 	if (rss_conf->rss_key) {
1965 		memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
1966 		err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx,
1967 						 hashkey);
1968 		if (err) {
1969 			PMD_DRV_LOG(ERR, "Set rss template table failed");
1970 			goto disable_rss;
1971 		}
1972 	}
1973 
1974 	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
1975 	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
1976 	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
1977 	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
1978 	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
1979 	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
1980 	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
1981 	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
1982 
1983 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
1984 	if (err) {
1985 		PMD_DRV_LOG(ERR, "Set rss type table failed");
1986 		goto disable_rss;
1987 	}
1988 
1989 	return 0;
1990 
1991 disable_rss:
1992 	memset(prio_tc, 0, sizeof(prio_tc));
1993 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
1994 	return err;
1995 }
1996 
1997 /**
1998  * DPDK callback to get the RSS hash configuration.
1999  *
2000  * @param dev
2001  *   Pointer to Ethernet device structure.
2002  * @param rss_conf
2003  *   RSS configuration data.
2004  *
2005  * @return
2006  *   0 on success, negative error value otherwise.
2007  */
2008 static int hinic_rss_conf_get(struct rte_eth_dev *dev,
2009 		       struct rte_eth_rss_conf *rss_conf)
2010 {
2011 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2012 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2013 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
2014 	struct nic_rss_type rss_type = {0};
2015 	int err;
2016 
2017 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
2018 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
2019 		return HINIC_ERROR;
2020 	}
2021 
2022 	err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
2023 	if (err)
2024 		return err;
2025 
2026 	if (rss_conf->rss_key &&
2027 	    rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) {
2028 		memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey));
2029 		rss_conf->rss_key_len = sizeof(hashkey);
2030 	}
2031 
2032 	err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type);
2033 	if (err)
2034 		return err;
2035 
2036 	rss_conf->rss_hf = 0;
2037 	rss_conf->rss_hf |=  rss_type.ipv4 ?
2038 		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
2039 	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2040 	rss_conf->rss_hf |=  rss_type.ipv6 ?
2041 		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
2042 	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
2043 	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2044 	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
2045 	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2046 	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2047 
2048 	return HINIC_OK;
2049 }
2050 
2051 /**
2052  * DPDK callback to update the RSS redirection table.
2053  *
2054  * @param dev
2055  *   Pointer to Ethernet device structure.
2056  * @param reta_conf
2057  *   Pointer to RSS reta configuration data.
2058  * @param reta_size
2059  *   Size of the RETA table.
2060  *
2061  * @return
2062  *   0 on success, negative error value otherwise.
2063  */
2064 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
2065 			      struct rte_eth_rss_reta_entry64 *reta_conf,
2066 			      uint16_t reta_size)
2067 {
2068 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2069 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2070 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
2071 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2072 	int err = 0;
2073 	u16 i = 0;
2074 	u16 idx, shift;
2075 
2076 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
2077 		return HINIC_OK;
2078 
2079 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2080 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2081 		return HINIC_ERROR;
2082 	}
2083 
2084 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2085 	if (err)
2086 		return err;
2087 
2088 	/* update rss indir_tbl */
2089 	for (i = 0; i < reta_size; i++) {
2090 		idx = i / RTE_RETA_GROUP_SIZE;
2091 		shift = i % RTE_RETA_GROUP_SIZE;
2092 
2093 		if (reta_conf[idx].reta[shift] >= nic_dev->num_rq) {
2094 			PMD_DRV_LOG(ERR, "Invalid reta entry, indirtbl[%d]: %d "
2095 				"exceeds the maximum rxq num: %d", i,
2096 				reta_conf[idx].reta[shift], nic_dev->num_rq);
2097 			return -EINVAL;
2098 		}
2099 
2100 		if (reta_conf[idx].mask & (1ULL << shift))
2101 			indirtbl[i] = reta_conf[idx].reta[shift];
2102 	}
2103 
2104 	err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2105 	if (err)
2106 		goto disable_rss;
2107 
2108 	nic_dev->rss_indir_flag = true;
2109 
2110 	return 0;
2111 
2112 disable_rss:
2113 	memset(prio_tc, 0, sizeof(prio_tc));
2114 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
2115 
2116 	return HINIC_ERROR;
2117 }
2118 
2119 /**
2120  * DPDK callback to get the RSS indirection table.
2121  *
2122  * @param dev
2123  *   Pointer to Ethernet device structure.
2124  * @param reta_conf
2125  *   Pointer to RSS reta configuration data.
2126  * @param reta_size
2127  *   Size of the RETA table.
2128  *
2129  * @return
2130  *   0 on success, negative error value otherwise.
2131  */
2132 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
2133 			     struct rte_eth_rss_reta_entry64 *reta_conf,
2134 			     uint16_t reta_size)
2135 {
2136 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2137 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2138 	int err = 0;
2139 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2140 	u16 idx, shift;
2141 	u16 i = 0;
2142 
2143 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2144 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size: %d", reta_size);
2145 		return HINIC_ERROR;
2146 	}
2147 
2148 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2149 	if (err) {
2150 		PMD_DRV_LOG(ERR, "Get rss indirect table failed, error: %d",
2151 			    err);
2152 		return err;
2153 	}
2154 
2155 	for (i = 0; i < reta_size; i++) {
2156 		idx = i / RTE_RETA_GROUP_SIZE;
2157 		shift = i % RTE_RETA_GROUP_SIZE;
2158 		if (reta_conf[idx].mask & (1ULL << shift))
2159 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
2160 	}
2161 
2162 	return HINIC_OK;
2163 }
2164 
2165 /**
2166  * DPDK callback to get extended device statistics.
2167  *
2168  * @param dev
2169  *   Pointer to Ethernet device.
2170  * @param xstats
2171  *   Pointer to rte extended stats table.
2172  * @param n
2173  *   The size of the stats table.
2174  *
2175  * @return
2176  *   Number of extended stats on success and stats is filled,
2177  *   negative error value otherwise.
2178  */
2179 static int hinic_dev_xstats_get(struct rte_eth_dev *dev,
2180 			 struct rte_eth_xstat *xstats,
2181 			 unsigned int n)
2182 {
2183 	u16 qid = 0;
2184 	u32 i;
2185 	int err, count;
2186 	struct hinic_nic_dev *nic_dev;
2187 	struct hinic_phy_port_stats port_stats;
2188 	struct hinic_vport_stats vport_stats;
2189 	struct hinic_rxq	*rxq = NULL;
2190 	struct hinic_rxq_stats rxq_stats;
2191 	struct hinic_txq	*txq = NULL;
2192 	struct hinic_txq_stats txq_stats;
2193 
2194 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2195 	count = hinic_xstats_calc_num(nic_dev);
2196 	if ((int)n < count)
2197 		return count;
2198 
2199 	count = 0;
2200 
2201 	/* Get stats from hinic_rxq_stats */
2202 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
2203 		rxq = nic_dev->rxqs[qid];
2204 		hinic_rxq_get_stats(rxq, &rxq_stats);
2205 
2206 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2207 			xstats[count].value =
2208 				*(uint64_t *)(((char *)&rxq_stats) +
2209 				hinic_rxq_stats_strings[i].offset);
2210 			xstats[count].id = count;
2211 			count++;
2212 		}
2213 	}
2214 
2215 	/* Get stats from hinic_txq_stats */
2216 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
2217 		txq = nic_dev->txqs[qid];
2218 		hinic_txq_get_stats(txq, &txq_stats);
2219 
2220 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2221 			xstats[count].value =
2222 				*(uint64_t *)(((char *)&txq_stats) +
2223 				hinic_txq_stats_strings[i].offset);
2224 			xstats[count].id = count;
2225 			count++;
2226 		}
2227 	}
2228 
2229 	/* Get stats from hinic_vport_stats */
2230 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
2231 	if (err)
2232 		return err;
2233 
2234 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2235 		xstats[count].value =
2236 			*(uint64_t *)(((char *)&vport_stats) +
2237 			hinic_vport_stats_strings[i].offset);
2238 		xstats[count].id = count;
2239 		count++;
2240 	}
2241 
2242 	if (HINIC_IS_VF(nic_dev->hwdev))
2243 		return count;
2244 
2245 	/* Get stats from hinic_phy_port_stats */
2246 	err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats);
2247 	if (err)
2248 		return err;
2249 
2250 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2251 		xstats[count].value = *(uint64_t *)(((char *)&port_stats) +
2252 				hinic_phyport_stats_strings[i].offset);
2253 		xstats[count].id = count;
2254 		count++;
2255 	}
2256 
2257 	return count;
2258 }
2259 
2260 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2261 				struct rte_eth_rxq_info *qinfo)
2262 {
2263 	struct hinic_rxq  *rxq = dev->data->rx_queues[queue_id];
2264 
2265 	qinfo->mp = rxq->mb_pool;
2266 	qinfo->nb_desc = rxq->q_depth;
2267 }
2268 
2269 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2270 				struct rte_eth_txq_info *qinfo)
2271 {
2272 	struct hinic_txq  *txq = dev->data->tx_queues[queue_id];
2273 
2274 	qinfo->nb_desc = txq->q_depth;
2275 }
2276 
2277 /**
2278  * DPDK callback to retrieve names of extended device statistics
2279  *
2280  * @param dev
2281  *   Pointer to Ethernet device structure.
2282  * @param xstats_names
2283  *   Buffer to insert names into.
2284  *
2285  * @return
2286  *   Number of xstats names.
2287  */
2288 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
2289 			       struct rte_eth_xstat_name *xstats_names,
2290 			       __rte_unused unsigned int limit)
2291 {
2292 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2293 	int count = 0;
2294 	u16 i = 0, q_num;
2295 
2296 	if (xstats_names == NULL)
2297 		return hinic_xstats_calc_num(nic_dev);
2298 
2299 	/* get pmd rxq stats */
2300 	for (q_num = 0; q_num < nic_dev->num_rq; q_num++) {
2301 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2302 			snprintf(xstats_names[count].name,
2303 				 sizeof(xstats_names[count].name),
2304 				 "rxq%d_%s_pmd",
2305 				 q_num, hinic_rxq_stats_strings[i].name);
2306 			count++;
2307 		}
2308 	}
2309 
2310 	/* get pmd txq stats */
2311 	for (q_num = 0; q_num < nic_dev->num_sq; q_num++) {
2312 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2313 			snprintf(xstats_names[count].name,
2314 				 sizeof(xstats_names[count].name),
2315 				 "txq%d_%s_pmd",
2316 				 q_num, hinic_txq_stats_strings[i].name);
2317 			count++;
2318 		}
2319 	}
2320 
2321 	/* get vport stats */
2322 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2323 		snprintf(xstats_names[count].name,
2324 			 sizeof(xstats_names[count].name),
2325 			 "%s", hinic_vport_stats_strings[i].name);
2326 		count++;
2327 	}
2328 
2329 	if (HINIC_IS_VF(nic_dev->hwdev))
2330 		return count;
2331 
2332 	/* get phy port stats */
2333 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2334 		snprintf(xstats_names[count].name,
2335 			 sizeof(xstats_names[count].name),
2336 			 "%s", hinic_phyport_stats_strings[i].name);
2337 		count++;
2338 	}
2339 
2340 	return count;
2341 }
2342 
2343 /**
2344  *  DPDK callback to set mac address
2345  *
2346  * @param dev
2347  *   Pointer to Ethernet device structure.
2348  * @param addr
2349  *   Pointer to mac address
2350  * @return
2351  *   0 on success, negative error value otherwise.
2352  */
2353 static int hinic_set_mac_addr(struct rte_eth_dev *dev,
2354 			      struct rte_ether_addr *addr)
2355 {
2356 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2357 	u16 func_id;
2358 	int err;
2359 
2360 	func_id = hinic_global_func_id(nic_dev->hwdev);
2361 	err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes,
2362 			       addr->addr_bytes, 0, func_id);
2363 	if (err)
2364 		return err;
2365 
2366 	rte_ether_addr_copy(addr, &nic_dev->default_addr);
2367 
2368 	PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x",
2369 		    addr->addr_bytes[0], addr->addr_bytes[1],
2370 		    addr->addr_bytes[2], addr->addr_bytes[3],
2371 		    addr->addr_bytes[4], addr->addr_bytes[5]);
2372 
2373 	return 0;
2374 }
2375 
2376 /**
2377  * DPDK callback to remove a MAC address.
2378  *
2379  * @param dev
2380  *   Pointer to Ethernet device structure.
2381  * @param index
2382  *   MAC address index, should less than 128.
2383  */
2384 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2385 {
2386 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2387 	u16 func_id;
2388 	int ret;
2389 
2390 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2391 		PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range",
2392 			    index);
2393 		return;
2394 	}
2395 
2396 	func_id = hinic_global_func_id(nic_dev->hwdev);
2397 	ret = hinic_del_mac(nic_dev->hwdev,
2398 			    dev->data->mac_addrs[index].addr_bytes, 0, func_id);
2399 	if (ret)
2400 		return;
2401 
2402 	memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
2403 }
2404 
2405 /**
2406  * DPDK callback to add a MAC address.
2407  *
2408  * @param dev
2409  *   Pointer to Ethernet device structure.
2410  * @param mac_addr
2411  *   Pointer to MAC address
2412  * @param index
2413  *   MAC address index, should less than 128.
2414  * @param vmdq
2415  *   VMDq pool index(not used).
2416  *
2417  * @return
2418  *   0 on success, negative error value otherwise.
2419  */
2420 static int hinic_mac_addr_add(struct rte_eth_dev *dev,
2421 			      struct rte_ether_addr *mac_addr, uint32_t index,
2422 			      __rte_unused uint32_t vmdq)
2423 {
2424 	struct hinic_nic_dev  *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2425 	unsigned int i;
2426 	u16 func_id;
2427 	int ret;
2428 
2429 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2430 		PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range", index);
2431 		return -EINVAL;
2432 	}
2433 
2434 	/* First, make sure this address isn't already configured. */
2435 	for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) {
2436 		/* Skip this index, it's going to be reconfigured. */
2437 		if (i == index)
2438 			continue;
2439 
2440 		if (memcmp(&dev->data->mac_addrs[i],
2441 			mac_addr, sizeof(*mac_addr)))
2442 			continue;
2443 
2444 		PMD_DRV_LOG(INFO, "MAC address already configured");
2445 		return -EADDRINUSE;
2446 	}
2447 
2448 	func_id = hinic_global_func_id(nic_dev->hwdev);
2449 	ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id);
2450 	if (ret)
2451 		return ret;
2452 
2453 	dev->data->mac_addrs[index] = *mac_addr;
2454 	return 0;
2455 }
2456 
2457 /**
2458  *  DPDK callback to set multicast mac address
2459  *
2460  * @param dev
2461  *   Pointer to Ethernet device structure.
2462  * @param mc_addr_set
2463  *   Pointer to multicast mac address
2464  * @param nb_mc_addr
2465  *   mc addr count
2466  * @return
2467  *   0 on success, negative error value otherwise.
2468  */
2469 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
2470 				  struct rte_ether_addr *mc_addr_set,
2471 				  uint32_t nb_mc_addr)
2472 {
2473 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2474 	u16 func_id;
2475 	int ret;
2476 	u32 i;
2477 
2478 	func_id = hinic_global_func_id(nic_dev->hwdev);
2479 
2480 	/* delete old multi_cast addrs firstly */
2481 	hinic_delete_mc_addr_list(nic_dev);
2482 
2483 	if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS)
2484 		goto allmulti;
2485 
2486 	for (i = 0; i < nb_mc_addr; i++) {
2487 		ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes,
2488 				    0, func_id);
2489 		/* if add mc addr failed, set all multi_cast */
2490 		if (ret) {
2491 			hinic_delete_mc_addr_list(nic_dev);
2492 			goto allmulti;
2493 		}
2494 
2495 		rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]);
2496 	}
2497 
2498 	return 0;
2499 
2500 allmulti:
2501 	hinic_dev_allmulticast_enable(dev);
2502 
2503 	return 0;
2504 }
2505 
2506 /**
2507  * DPDK callback to get flow operations
2508  *
2509  * @param dev
2510  *   Pointer to Ethernet device structure.
2511  * @param ops
2512  *   Pointer to operation-specific structure.
2513  *
2514  * @return
2515  *   0 on success, negative error value otherwise.
2516  */
2517 static int hinic_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
2518 				  const struct rte_flow_ops **ops)
2519 {
2520 	*ops = &hinic_flow_ops;
2521 	return 0;
2522 }
2523 
2524 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)
2525 {
2526 	struct nic_pause_config pause_config = {0};
2527 	int err;
2528 
2529 	pause_config.auto_neg = 0;
2530 	pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2531 	pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2532 
2533 	err = hinic_set_pause_config(nic_dev->hwdev, pause_config);
2534 	if (err)
2535 		return err;
2536 
2537 	nic_dev->pause_set = true;
2538 	nic_dev->nic_pause.auto_neg = pause_config.auto_neg;
2539 	nic_dev->nic_pause.rx_pause = pause_config.rx_pause;
2540 	nic_dev->nic_pause.tx_pause = pause_config.tx_pause;
2541 
2542 	return 0;
2543 }
2544 
2545 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev)
2546 {
2547 	u8 up_tc[HINIC_DCB_UP_MAX] = {0};
2548 	u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
2549 	u8 up_bw[HINIC_DCB_UP_MAX] = {0};
2550 	u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
2551 	u8 up_strict[HINIC_DCB_UP_MAX] = {0};
2552 	int i = 0;
2553 
2554 	pg_bw[0] = 100;
2555 	for (i = 0; i < HINIC_DCB_UP_MAX; i++)
2556 		up_bw[i] = 100;
2557 
2558 	return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw,
2559 					up_pgid, up_bw, up_strict);
2560 }
2561 
2562 static int hinic_pf_get_default_cos(struct hinic_hwdev *hwdev, u8 *cos_id)
2563 {
2564 	u8 default_cos = 0;
2565 	u8 valid_cos_bitmap;
2566 	u8 i;
2567 
2568 	valid_cos_bitmap = hwdev->cfg_mgmt->svc_cap.valid_cos_bitmap;
2569 	if (!valid_cos_bitmap) {
2570 		PMD_DRV_LOG(ERR, "PF has none cos to support\n");
2571 		return -EFAULT;
2572 	}
2573 
2574 	for (i = 0; i < NR_MAX_COS; i++) {
2575 		if (valid_cos_bitmap & BIT(i))
2576 			default_cos = i; /* Find max cos id as default cos */
2577 	}
2578 
2579 	*cos_id = default_cos;
2580 
2581 	return 0;
2582 }
2583 
2584 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev)
2585 {
2586 	u8 cos_id = 0;
2587 	int err;
2588 
2589 	if (!HINIC_IS_VF(nic_dev->hwdev)) {
2590 		err = hinic_pf_get_default_cos(nic_dev->hwdev, &cos_id);
2591 		if (err) {
2592 			PMD_DRV_LOG(ERR, "Get PF default cos failed, err: %d",
2593 				    err);
2594 			return HINIC_ERROR;
2595 		}
2596 	} else {
2597 		err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id);
2598 		if (err) {
2599 			PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d",
2600 				    err);
2601 			return HINIC_ERROR;
2602 		}
2603 	}
2604 
2605 	nic_dev->default_cos = cos_id;
2606 
2607 	PMD_DRV_LOG(INFO, "Default cos %d", nic_dev->default_cos);
2608 
2609 	return 0;
2610 }
2611 
2612 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
2613 {
2614 	int err;
2615 
2616 	err = hinic_init_default_cos(nic_dev);
2617 	if (err)
2618 		return err;
2619 
2620 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2621 		return 0;
2622 
2623 	/* Restore DCB configure to default status */
2624 	err = hinic_set_default_dcb_feature(nic_dev);
2625 	if (err)
2626 		return err;
2627 
2628 	/* Set pause enable, and up will disable pfc. */
2629 	err = hinic_set_default_pause_feature(nic_dev);
2630 	if (err)
2631 		return err;
2632 
2633 	err = hinic_reset_port_link_cfg(nic_dev->hwdev);
2634 	if (err)
2635 		return err;
2636 
2637 	err = hinic_set_link_status_follow(nic_dev->hwdev,
2638 					   HINIC_LINK_FOLLOW_PORT);
2639 	if (err == HINIC_MGMT_CMD_UNSUPPORTED)
2640 		PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status");
2641 	else if (err)
2642 		return err;
2643 
2644 	return hinic_set_anti_attack(nic_dev->hwdev, true);
2645 }
2646 
2647 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev)
2648 {
2649 	struct hinic_board_info info = { 0 };
2650 	int rc;
2651 
2652 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2653 		return 0;
2654 
2655 	rc = hinic_get_board_info(nic_dev->hwdev, &info);
2656 	if (rc)
2657 		return rc;
2658 
2659 	return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK :
2660 						HINIC_ERROR);
2661 }
2662 
2663 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev)
2664 {
2665 	nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name);
2666 	if (nic_dev->cpy_mpool == NULL) {
2667 		nic_dev->cpy_mpool =
2668 		rte_pktmbuf_pool_create(nic_dev->proc_dev_name,
2669 					HINIC_COPY_MEMPOOL_DEPTH,
2670 					0, 0,
2671 					HINIC_COPY_MBUF_SIZE,
2672 					rte_socket_id());
2673 		if (!nic_dev->cpy_mpool) {
2674 			PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s",
2675 				    rte_errno, nic_dev->proc_dev_name);
2676 			return -ENOMEM;
2677 		}
2678 	}
2679 
2680 	return 0;
2681 }
2682 
2683 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev)
2684 {
2685 	if (nic_dev->cpy_mpool != NULL)
2686 		rte_mempool_free(nic_dev->cpy_mpool);
2687 }
2688 
2689 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2690 {
2691 	u32 txq_size;
2692 	u32 rxq_size;
2693 
2694 	/* allocate software txq array */
2695 	txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs);
2696 	nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL);
2697 	if (!nic_dev->txqs) {
2698 		PMD_DRV_LOG(ERR, "Allocate txqs failed");
2699 		return -ENOMEM;
2700 	}
2701 
2702 	/* allocate software rxq array */
2703 	rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs);
2704 	nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL);
2705 	if (!nic_dev->rxqs) {
2706 		/* free txqs */
2707 		kfree(nic_dev->txqs);
2708 		nic_dev->txqs = NULL;
2709 
2710 		PMD_DRV_LOG(ERR, "Allocate rxqs failed");
2711 		return -ENOMEM;
2712 	}
2713 
2714 	return HINIC_OK;
2715 }
2716 
2717 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2718 {
2719 	kfree(nic_dev->txqs);
2720 	nic_dev->txqs = NULL;
2721 
2722 	kfree(nic_dev->rxqs);
2723 	nic_dev->rxqs = NULL;
2724 }
2725 
2726 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
2727 {
2728 	struct hinic_nic_dev *nic_dev =
2729 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2730 	int rc;
2731 
2732 	nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev),
2733 				     RTE_CACHE_LINE_SIZE);
2734 	if (!nic_dev->hwdev) {
2735 		PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s",
2736 			    eth_dev->data->name);
2737 		return -ENOMEM;
2738 	}
2739 	nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev);
2740 
2741 	/* init osdep*/
2742 	rc = hinic_osdep_init(nic_dev->hwdev);
2743 	if (rc) {
2744 		PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s",
2745 			    eth_dev->data->name);
2746 		goto init_osdep_fail;
2747 	}
2748 
2749 	/* init_hwif */
2750 	rc = hinic_hwif_res_init(nic_dev->hwdev);
2751 	if (rc) {
2752 		PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
2753 			    eth_dev->data->name);
2754 		goto init_hwif_fail;
2755 	}
2756 
2757 	/* init_cfg_mgmt */
2758 	rc = init_cfg_mgmt(nic_dev->hwdev);
2759 	if (rc) {
2760 		PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s",
2761 			    eth_dev->data->name);
2762 		goto init_cfgmgnt_fail;
2763 	}
2764 
2765 	/* init_aeqs */
2766 	rc = hinic_comm_aeqs_init(nic_dev->hwdev);
2767 	if (rc) {
2768 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s",
2769 			    eth_dev->data->name);
2770 		goto init_aeqs_fail;
2771 	}
2772 
2773 	/* init_pf_to_mgnt */
2774 	rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev);
2775 	if (rc) {
2776 		PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s",
2777 			    eth_dev->data->name);
2778 		goto init_pf_to_mgmt_fail;
2779 	}
2780 
2781 	/* init mailbox */
2782 	rc = hinic_comm_func_to_func_init(nic_dev->hwdev);
2783 	if (rc) {
2784 		PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s",
2785 			    eth_dev->data->name);
2786 		goto init_func_to_func_fail;
2787 	}
2788 
2789 	rc = hinic_card_workmode_check(nic_dev);
2790 	if (rc) {
2791 		PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s",
2792 			    eth_dev->data->name);
2793 		goto workmode_check_fail;
2794 	}
2795 
2796 	/* do l2nic reset to make chip clear */
2797 	rc = hinic_l2nic_reset(nic_dev->hwdev);
2798 	if (rc) {
2799 		PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s",
2800 			    eth_dev->data->name);
2801 		goto l2nic_reset_fail;
2802 	}
2803 
2804 	/* init dma and aeq msix attribute table */
2805 	(void)hinic_init_attr_table(nic_dev->hwdev);
2806 
2807 	/* init_cmdqs */
2808 	rc = hinic_comm_cmdqs_init(nic_dev->hwdev);
2809 	if (rc) {
2810 		PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s",
2811 			    eth_dev->data->name);
2812 		goto init_cmdq_fail;
2813 	}
2814 
2815 	/* set hardware state active */
2816 	rc = hinic_activate_hwdev_state(nic_dev->hwdev);
2817 	if (rc) {
2818 		PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s",
2819 			    eth_dev->data->name);
2820 		goto init_resources_state_fail;
2821 	}
2822 
2823 	/* init_capability */
2824 	rc = hinic_init_capability(nic_dev->hwdev);
2825 	if (rc) {
2826 		PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s",
2827 			    eth_dev->data->name);
2828 		goto init_cap_fail;
2829 	}
2830 
2831 	/* get nic capability */
2832 	if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap)) {
2833 		PMD_DRV_LOG(ERR, "Hw doesn't support nic, dev_name: %s",
2834 			    eth_dev->data->name);
2835 		rc = -EINVAL;
2836 		goto nic_check_fail;
2837 	}
2838 
2839 	/* init root cla and function table */
2840 	rc = hinic_init_nicio(nic_dev->hwdev);
2841 	if (rc) {
2842 		PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s",
2843 			    eth_dev->data->name);
2844 		goto init_nicio_fail;
2845 	}
2846 
2847 	/* init_software_txrxq */
2848 	rc = hinic_init_sw_rxtxqs(nic_dev);
2849 	if (rc) {
2850 		PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s",
2851 			    eth_dev->data->name);
2852 		goto init_sw_rxtxqs_fail;
2853 	}
2854 
2855 	rc = hinic_copy_mempool_init(nic_dev);
2856 	if (rc) {
2857 		PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s",
2858 			 eth_dev->data->name);
2859 		goto init_mpool_fail;
2860 	}
2861 
2862 	/* set hardware feature to default status */
2863 	rc = hinic_set_default_hw_feature(nic_dev);
2864 	if (rc) {
2865 		PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s",
2866 			    eth_dev->data->name);
2867 		goto set_default_hw_feature_fail;
2868 	}
2869 
2870 	return 0;
2871 
2872 set_default_hw_feature_fail:
2873 	hinic_copy_mempool_uninit(nic_dev);
2874 
2875 init_mpool_fail:
2876 	hinic_deinit_sw_rxtxqs(nic_dev);
2877 
2878 init_sw_rxtxqs_fail:
2879 	hinic_deinit_nicio(nic_dev->hwdev);
2880 
2881 nic_check_fail:
2882 init_nicio_fail:
2883 init_cap_fail:
2884 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2885 
2886 init_resources_state_fail:
2887 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2888 
2889 init_cmdq_fail:
2890 l2nic_reset_fail:
2891 workmode_check_fail:
2892 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2893 
2894 init_func_to_func_fail:
2895 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2896 
2897 init_pf_to_mgmt_fail:
2898 	hinic_comm_aeqs_free(nic_dev->hwdev);
2899 
2900 init_aeqs_fail:
2901 	free_cfg_mgmt(nic_dev->hwdev);
2902 
2903 init_cfgmgnt_fail:
2904 	hinic_hwif_res_free(nic_dev->hwdev);
2905 
2906 init_hwif_fail:
2907 	hinic_osdep_deinit(nic_dev->hwdev);
2908 
2909 init_osdep_fail:
2910 	rte_free(nic_dev->hwdev);
2911 	nic_dev->hwdev = NULL;
2912 
2913 	return rc;
2914 }
2915 
2916 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev)
2917 {
2918 	struct hinic_nic_dev *nic_dev =
2919 			HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2920 
2921 	(void)hinic_set_link_status_follow(nic_dev->hwdev,
2922 					   HINIC_LINK_FOLLOW_DEFAULT);
2923 	hinic_copy_mempool_uninit(nic_dev);
2924 	hinic_deinit_sw_rxtxqs(nic_dev);
2925 	hinic_deinit_nicio(nic_dev->hwdev);
2926 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2927 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2928 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2929 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2930 	hinic_comm_aeqs_free(nic_dev->hwdev);
2931 	free_cfg_mgmt(nic_dev->hwdev);
2932 	hinic_hwif_res_free(nic_dev->hwdev);
2933 	hinic_osdep_deinit(nic_dev->hwdev);
2934 	rte_free(nic_dev->hwdev);
2935 	nic_dev->hwdev = NULL;
2936 }
2937 
2938 /**
2939  * DPDK callback to close the device.
2940  *
2941  * @param dev
2942  *   Pointer to Ethernet device structure.
2943  */
2944 static int hinic_dev_close(struct rte_eth_dev *dev)
2945 {
2946 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2947 	int ret;
2948 
2949 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2950 		return 0;
2951 
2952 	if (rte_bit_relaxed_test_and_set32(HINIC_DEV_CLOSE,
2953 					   &nic_dev->dev_status)) {
2954 		PMD_DRV_LOG(WARNING, "Device %s already closed",
2955 			    dev->data->name);
2956 		return 0;
2957 	}
2958 
2959 	/* stop device first */
2960 	ret = hinic_dev_stop(dev);
2961 
2962 	/* rx_cqe, rx_info */
2963 	hinic_free_all_rx_resources(dev);
2964 
2965 	/* tx_info */
2966 	hinic_free_all_tx_resources(dev);
2967 
2968 	/* free wq, pi_dma_addr */
2969 	hinic_free_all_rq(nic_dev);
2970 
2971 	/* free wq, db_addr */
2972 	hinic_free_all_sq(nic_dev);
2973 
2974 	/* deinit mac vlan tbl */
2975 	hinic_deinit_mac_addr(dev);
2976 	hinic_remove_all_vlanid(dev);
2977 
2978 	/* disable hardware and uio interrupt */
2979 	hinic_disable_interrupt(dev);
2980 
2981 	/* destroy rx mode mutex */
2982 	hinic_mutex_destroy(&nic_dev->rx_mode_mutex);
2983 
2984 	/* deinit nic hardware device */
2985 	hinic_nic_dev_destroy(dev);
2986 
2987 	return ret;
2988 }
2989 
2990 static const struct eth_dev_ops hinic_pmd_ops = {
2991 	.dev_configure                 = hinic_dev_configure,
2992 	.dev_infos_get                 = hinic_dev_infos_get,
2993 	.fw_version_get                = hinic_fw_version_get,
2994 	.rx_queue_setup                = hinic_rx_queue_setup,
2995 	.tx_queue_setup                = hinic_tx_queue_setup,
2996 	.dev_start                     = hinic_dev_start,
2997 	.dev_set_link_up               = hinic_dev_set_link_up,
2998 	.dev_set_link_down             = hinic_dev_set_link_down,
2999 	.link_update                   = hinic_link_update,
3000 	.rx_queue_release              = hinic_rx_queue_release,
3001 	.tx_queue_release              = hinic_tx_queue_release,
3002 	.dev_stop                      = hinic_dev_stop,
3003 	.dev_close                     = hinic_dev_close,
3004 	.mtu_set                       = hinic_dev_set_mtu,
3005 	.vlan_filter_set               = hinic_vlan_filter_set,
3006 	.vlan_offload_set              = hinic_vlan_offload_set,
3007 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3008 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3009 	.promiscuous_enable            = hinic_dev_promiscuous_enable,
3010 	.promiscuous_disable           = hinic_dev_promiscuous_disable,
3011 	.flow_ctrl_get                 = hinic_flow_ctrl_get,
3012 	.flow_ctrl_set                 = hinic_flow_ctrl_set,
3013 	.rss_hash_update               = hinic_rss_hash_update,
3014 	.rss_hash_conf_get             = hinic_rss_conf_get,
3015 	.reta_update                   = hinic_rss_indirtbl_update,
3016 	.reta_query                    = hinic_rss_indirtbl_query,
3017 	.stats_get                     = hinic_dev_stats_get,
3018 	.stats_reset                   = hinic_dev_stats_reset,
3019 	.xstats_get                    = hinic_dev_xstats_get,
3020 	.xstats_reset                  = hinic_dev_xstats_reset,
3021 	.xstats_get_names              = hinic_dev_xstats_get_names,
3022 	.rxq_info_get                  = hinic_rxq_info_get,
3023 	.txq_info_get                  = hinic_txq_info_get,
3024 	.mac_addr_set                  = hinic_set_mac_addr,
3025 	.mac_addr_remove               = hinic_mac_addr_remove,
3026 	.mac_addr_add                  = hinic_mac_addr_add,
3027 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3028 	.flow_ops_get                  = hinic_dev_flow_ops_get,
3029 };
3030 
3031 static const struct eth_dev_ops hinic_pmd_vf_ops = {
3032 	.dev_configure                 = hinic_dev_configure,
3033 	.dev_infos_get                 = hinic_dev_infos_get,
3034 	.fw_version_get                = hinic_fw_version_get,
3035 	.rx_queue_setup                = hinic_rx_queue_setup,
3036 	.tx_queue_setup                = hinic_tx_queue_setup,
3037 	.dev_start                     = hinic_dev_start,
3038 	.link_update                   = hinic_link_update,
3039 	.rx_queue_release              = hinic_rx_queue_release,
3040 	.tx_queue_release              = hinic_tx_queue_release,
3041 	.dev_stop                      = hinic_dev_stop,
3042 	.dev_close                     = hinic_dev_close,
3043 	.mtu_set                       = hinic_dev_set_mtu,
3044 	.vlan_filter_set               = hinic_vlan_filter_set,
3045 	.vlan_offload_set              = hinic_vlan_offload_set,
3046 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
3047 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
3048 	.rss_hash_update               = hinic_rss_hash_update,
3049 	.rss_hash_conf_get             = hinic_rss_conf_get,
3050 	.reta_update                   = hinic_rss_indirtbl_update,
3051 	.reta_query                    = hinic_rss_indirtbl_query,
3052 	.stats_get                     = hinic_dev_stats_get,
3053 	.stats_reset                   = hinic_dev_stats_reset,
3054 	.xstats_get                    = hinic_dev_xstats_get,
3055 	.xstats_reset                  = hinic_dev_xstats_reset,
3056 	.xstats_get_names              = hinic_dev_xstats_get_names,
3057 	.rxq_info_get                  = hinic_rxq_info_get,
3058 	.txq_info_get                  = hinic_txq_info_get,
3059 	.mac_addr_set                  = hinic_set_mac_addr,
3060 	.mac_addr_remove               = hinic_mac_addr_remove,
3061 	.mac_addr_add                  = hinic_mac_addr_add,
3062 	.set_mc_addr_list              = hinic_set_mc_addr_list,
3063 	.flow_ops_get                  = hinic_dev_flow_ops_get,
3064 };
3065 
3066 static const struct eth_dev_ops hinic_dev_sec_ops = {
3067 	.dev_infos_get                 = hinic_dev_infos_get,
3068 };
3069 
3070 static int hinic_func_init(struct rte_eth_dev *eth_dev)
3071 {
3072 	struct rte_pci_device *pci_dev;
3073 	struct rte_ether_addr *eth_addr;
3074 	struct hinic_nic_dev *nic_dev;
3075 	struct hinic_filter_info *filter_info;
3076 	struct hinic_tcam_info *tcam_info;
3077 	u32 mac_size;
3078 	int rc;
3079 
3080 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3081 
3082 	/* EAL is SECONDARY and eth_dev is already created */
3083 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3084 		eth_dev->dev_ops = &hinic_dev_sec_ops;
3085 		PMD_DRV_LOG(INFO, "Initialize %s in secondary process",
3086 			    eth_dev->data->name);
3087 
3088 		return 0;
3089 	}
3090 
3091 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3092 
3093 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
3094 	memset(nic_dev, 0, sizeof(*nic_dev));
3095 
3096 	snprintf(nic_dev->proc_dev_name,
3097 		 sizeof(nic_dev->proc_dev_name),
3098 		 "hinic-%.4x:%.2x:%.2x.%x",
3099 		 pci_dev->addr.domain, pci_dev->addr.bus,
3100 		 pci_dev->addr.devid, pci_dev->addr.function);
3101 
3102 	/* alloc mac_addrs */
3103 	mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3104 	eth_addr = rte_zmalloc("hinic_mac", mac_size, 0);
3105 	if (!eth_addr) {
3106 		PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s",
3107 			    eth_dev->data->name);
3108 		rc = -ENOMEM;
3109 		goto eth_addr_fail;
3110 	}
3111 	eth_dev->data->mac_addrs = eth_addr;
3112 
3113 	mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr);
3114 	nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0);
3115 	if (!nic_dev->mc_list) {
3116 		PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s",
3117 			    eth_dev->data->name);
3118 		rc = -ENOMEM;
3119 		goto mc_addr_fail;
3120 	}
3121 
3122 	/* create hardware nic_device */
3123 	rc = hinic_nic_dev_create(eth_dev);
3124 	if (rc) {
3125 		PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s",
3126 			    eth_dev->data->name);
3127 		goto create_nic_dev_fail;
3128 	}
3129 
3130 	if (HINIC_IS_VF(nic_dev->hwdev))
3131 		eth_dev->dev_ops = &hinic_pmd_vf_ops;
3132 	else
3133 		eth_dev->dev_ops = &hinic_pmd_ops;
3134 
3135 	rc = hinic_init_mac_addr(eth_dev);
3136 	if (rc) {
3137 		PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s",
3138 			    eth_dev->data->name);
3139 		goto init_mac_fail;
3140 	}
3141 
3142 	/* register callback func to eal lib */
3143 	rc = rte_intr_callback_register(&pci_dev->intr_handle,
3144 					hinic_dev_interrupt_handler,
3145 					(void *)eth_dev);
3146 	if (rc) {
3147 		PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s",
3148 			    eth_dev->data->name);
3149 		goto reg_intr_cb_fail;
3150 	}
3151 
3152 	/* enable uio/vfio intr/eventfd mapping */
3153 	rc = rte_intr_enable(&pci_dev->intr_handle);
3154 	if (rc) {
3155 		PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s",
3156 			    eth_dev->data->name);
3157 		goto enable_intr_fail;
3158 	}
3159 	rte_bit_relaxed_set32(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
3160 
3161 	hinic_mutex_init(&nic_dev->rx_mode_mutex, NULL);
3162 
3163 	/* initialize filter info */
3164 	filter_info = &nic_dev->filter;
3165 	tcam_info = &nic_dev->tcam;
3166 	memset(filter_info, 0, sizeof(struct hinic_filter_info));
3167 	memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
3168 	/* initialize 5tuple filter list */
3169 	TAILQ_INIT(&filter_info->fivetuple_list);
3170 	TAILQ_INIT(&tcam_info->tcam_list);
3171 	TAILQ_INIT(&nic_dev->filter_ntuple_list);
3172 	TAILQ_INIT(&nic_dev->filter_ethertype_list);
3173 	TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
3174 	TAILQ_INIT(&nic_dev->hinic_flow_list);
3175 
3176 	rte_bit_relaxed_set32(HINIC_DEV_INIT, &nic_dev->dev_status);
3177 	PMD_DRV_LOG(INFO, "Initialize %s in primary successfully",
3178 		    eth_dev->data->name);
3179 
3180 	return 0;
3181 
3182 enable_intr_fail:
3183 	(void)rte_intr_callback_unregister(&pci_dev->intr_handle,
3184 					   hinic_dev_interrupt_handler,
3185 					   (void *)eth_dev);
3186 
3187 reg_intr_cb_fail:
3188 	hinic_deinit_mac_addr(eth_dev);
3189 
3190 init_mac_fail:
3191 	eth_dev->dev_ops = NULL;
3192 	hinic_nic_dev_destroy(eth_dev);
3193 
3194 create_nic_dev_fail:
3195 	rte_free(nic_dev->mc_list);
3196 	nic_dev->mc_list = NULL;
3197 
3198 mc_addr_fail:
3199 	rte_free(eth_addr);
3200 	eth_dev->data->mac_addrs = NULL;
3201 
3202 eth_addr_fail:
3203 	PMD_DRV_LOG(ERR, "Initialize %s in primary failed",
3204 		    eth_dev->data->name);
3205 	return rc;
3206 }
3207 
3208 static int hinic_dev_init(struct rte_eth_dev *eth_dev)
3209 {
3210 	struct rte_pci_device *pci_dev;
3211 
3212 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3213 
3214 	PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process",
3215 		    pci_dev->addr.domain, pci_dev->addr.bus,
3216 		    pci_dev->addr.devid, pci_dev->addr.function,
3217 		    (rte_eal_process_type() == RTE_PROC_PRIMARY) ?
3218 		    "primary" : "secondary");
3219 
3220 	/* rte_eth_dev rx_burst and tx_burst */
3221 	eth_dev->rx_pkt_burst = hinic_recv_pkts;
3222 	eth_dev->tx_pkt_burst = hinic_xmit_pkts;
3223 
3224 	return hinic_func_init(eth_dev);
3225 }
3226 
3227 static int hinic_dev_uninit(struct rte_eth_dev *dev)
3228 {
3229 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3230 		return 0;
3231 
3232 	hinic_dev_close(dev);
3233 
3234 	return HINIC_OK;
3235 }
3236 
3237 static struct rte_pci_id pci_id_hinic_map[] = {
3238 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
3239 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
3240 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
3241 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
3242 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },
3243 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) },
3244 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) },
3245 	{.vendor_id = 0},
3246 };
3247 
3248 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3249 			   struct rte_pci_device *pci_dev)
3250 {
3251 	return rte_eth_dev_pci_generic_probe(pci_dev,
3252 		sizeof(struct hinic_nic_dev), hinic_dev_init);
3253 }
3254 
3255 static int hinic_pci_remove(struct rte_pci_device *pci_dev)
3256 {
3257 	return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit);
3258 }
3259 
3260 static struct rte_pci_driver rte_hinic_pmd = {
3261 	.id_table = pci_id_hinic_map,
3262 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3263 	.probe = hinic_pci_probe,
3264 	.remove = hinic_pci_remove,
3265 };
3266 
3267 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd);
3268 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map);
3269 RTE_LOG_REGISTER(hinic_logtype, pmd.net.hinic, INFO);
3270