xref: /dpdk/drivers/net/hinic/hinic_pmd_ethdev.c (revision d56ec3dcad056c47cef4e837d5191d04c936d87e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include <rte_pci.h>
6 #include <rte_bus_pci.h>
7 #include <rte_ethdev_pci.h>
8 #include <rte_mbuf.h>
9 #include <rte_malloc.h>
10 #include <rte_memcpy.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_ether.h>
14 
15 #include "base/hinic_compat.h"
16 #include "base/hinic_pmd_hwdev.h"
17 #include "base/hinic_pmd_hwif.h"
18 #include "base/hinic_pmd_wq.h"
19 #include "base/hinic_pmd_cfg.h"
20 #include "base/hinic_pmd_mgmt.h"
21 #include "base/hinic_pmd_cmdq.h"
22 #include "base/hinic_pmd_niccfg.h"
23 #include "base/hinic_pmd_nicio.h"
24 #include "base/hinic_pmd_mbox.h"
25 #include "hinic_pmd_ethdev.h"
26 #include "hinic_pmd_tx.h"
27 #include "hinic_pmd_rx.h"
28 
29 /* Vendor ID used by Huawei devices */
30 #define HINIC_HUAWEI_VENDOR_ID		0x19E5
31 
32 /* Hinic devices */
33 #define HINIC_DEV_ID_PRD		0x1822
34 #define HINIC_DEV_ID_VF			0x375E
35 #define HINIC_DEV_ID_VF_HV		0x379E
36 
37 /* Mezz card for Blade Server */
38 #define HINIC_DEV_ID_MEZZ_25GE		0x0210
39 #define HINIC_DEV_ID_MEZZ_40GE		0x020D
40 #define HINIC_DEV_ID_MEZZ_100GE		0x0205
41 
42 /* 2*25G and 2*100G card */
43 #define HINIC_DEV_ID_1822_DUAL_25GE	0x0206
44 #define HINIC_DEV_ID_1822_100GE		0x0200
45 
46 #define HINIC_SERVICE_MODE_NIC		2
47 
48 #define HINIC_INTR_CB_UNREG_MAX_RETRIES	10
49 
50 #define DEFAULT_BASE_COS		4
51 #define NR_MAX_COS			8
52 
53 #define HINIC_MIN_RX_BUF_SIZE		1024
54 #define HINIC_MAX_UC_MAC_ADDRS		128
55 #define HINIC_MAX_MC_MAC_ADDRS		2048
56 
57 #define HINIC_DEFAULT_BURST_SIZE	32
58 #define HINIC_DEFAULT_NB_QUEUES		1
59 #define HINIC_DEFAULT_RING_SIZE		1024
60 
61 /*
62  * vlan_id is a 12 bit number.
63  * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
64  * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
65  * The higher 7 bit val specifies VFTA array index.
66  */
67 #define HINIC_VFTA_BIT(vlan_id)    (1 << ((vlan_id) & 0x1F))
68 #define HINIC_VFTA_IDX(vlan_id)    ((vlan_id) >> 5)
69 
70 #define HINIC_VLAN_FILTER_EN		(1U << 0)
71 
72 #define HINIC_MTU_TO_PKTLEN(mtu)	\
73 	((mtu) + ETH_HLEN + ETH_CRC_LEN)
74 
75 #define HINIC_PKTLEN_TO_MTU(pktlen)	\
76 	((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
77 
78 /* lro numer limit for one packet */
79 #define HINIC_LRO_WQE_NUM_DEFAULT	8
80 
81 /* Driver-specific log messages type */
82 int hinic_logtype;
83 
84 struct hinic_xstats_name_off {
85 	char name[RTE_ETH_XSTATS_NAME_SIZE];
86 	u32  offset;
87 };
88 
89 #define HINIC_FUNC_STAT(_stat_item) {	\
90 	.name = #_stat_item, \
91 	.offset = offsetof(struct hinic_vport_stats, _stat_item) \
92 }
93 
94 #define HINIC_PORT_STAT(_stat_item) { \
95 	.name = #_stat_item, \
96 	.offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
97 }
98 
99 static const struct hinic_xstats_name_off hinic_vport_stats_strings[] = {
100 	HINIC_FUNC_STAT(tx_unicast_pkts_vport),
101 	HINIC_FUNC_STAT(tx_unicast_bytes_vport),
102 	HINIC_FUNC_STAT(tx_multicast_pkts_vport),
103 	HINIC_FUNC_STAT(tx_multicast_bytes_vport),
104 	HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
105 	HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
106 
107 	HINIC_FUNC_STAT(rx_unicast_pkts_vport),
108 	HINIC_FUNC_STAT(rx_unicast_bytes_vport),
109 	HINIC_FUNC_STAT(rx_multicast_pkts_vport),
110 	HINIC_FUNC_STAT(rx_multicast_bytes_vport),
111 	HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
112 	HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
113 
114 	HINIC_FUNC_STAT(tx_discard_vport),
115 	HINIC_FUNC_STAT(rx_discard_vport),
116 	HINIC_FUNC_STAT(tx_err_vport),
117 	HINIC_FUNC_STAT(rx_err_vport),
118 };
119 
120 #define HINIC_VPORT_XSTATS_NUM (sizeof(hinic_vport_stats_strings) / \
121 		sizeof(hinic_vport_stats_strings[0]))
122 
123 static const struct hinic_xstats_name_off hinic_phyport_stats_strings[] = {
124 	HINIC_PORT_STAT(mac_rx_total_pkt_num),
125 	HINIC_PORT_STAT(mac_rx_total_oct_num),
126 	HINIC_PORT_STAT(mac_rx_bad_pkt_num),
127 	HINIC_PORT_STAT(mac_rx_bad_oct_num),
128 	HINIC_PORT_STAT(mac_rx_good_pkt_num),
129 	HINIC_PORT_STAT(mac_rx_good_oct_num),
130 	HINIC_PORT_STAT(mac_rx_uni_pkt_num),
131 	HINIC_PORT_STAT(mac_rx_multi_pkt_num),
132 	HINIC_PORT_STAT(mac_rx_broad_pkt_num),
133 	HINIC_PORT_STAT(mac_tx_total_pkt_num),
134 	HINIC_PORT_STAT(mac_tx_total_oct_num),
135 	HINIC_PORT_STAT(mac_tx_bad_pkt_num),
136 	HINIC_PORT_STAT(mac_tx_bad_oct_num),
137 	HINIC_PORT_STAT(mac_tx_good_pkt_num),
138 	HINIC_PORT_STAT(mac_tx_good_oct_num),
139 	HINIC_PORT_STAT(mac_tx_uni_pkt_num),
140 	HINIC_PORT_STAT(mac_tx_multi_pkt_num),
141 	HINIC_PORT_STAT(mac_tx_broad_pkt_num),
142 	HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
143 	HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
144 	HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
145 	HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
146 	HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
147 	HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
148 	HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
149 	HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
150 	HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
151 	HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
152 	HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
153 	HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
154 	HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
155 	HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
156 	HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
157 	HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
158 	HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
159 	HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
160 	HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
161 	HINIC_PORT_STAT(mac_rx_mac_pause_num),
162 	HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
163 	HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
164 	HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
165 	HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
166 	HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
167 	HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
168 	HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
169 	HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
170 	HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
171 	HINIC_PORT_STAT(mac_rx_mac_control_pkt_num),
172 	HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
173 	HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
174 	HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
175 	HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
176 	HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
177 	HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
178 	HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
179 	HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
180 	HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
181 	HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
182 	HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
183 	HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
184 	HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
185 	HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
186 	HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
187 	HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
188 	HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
189 	HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
190 	HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
191 	HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
192 	HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
193 	HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
194 	HINIC_PORT_STAT(mac_trans_jabber_pkt_num),
195 	HINIC_PORT_STAT(mac_tx_mac_pause_num),
196 	HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
197 	HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
198 	HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
199 	HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
200 	HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
201 	HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
202 	HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
203 	HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
204 	HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
205 	HINIC_PORT_STAT(mac_tx_mac_control_pkt_num),
206 	HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
207 	HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
208 	HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
209 };
210 
211 #define HINIC_PHYPORT_XSTATS_NUM (sizeof(hinic_phyport_stats_strings) / \
212 		sizeof(hinic_phyport_stats_strings[0]))
213 
214 static const struct hinic_xstats_name_off hinic_rxq_stats_strings[] = {
215 	{"rx_nombuf", offsetof(struct hinic_rxq_stats, rx_nombuf)},
216 	{"burst_pkt", offsetof(struct hinic_rxq_stats, burst_pkts)},
217 };
218 
219 #define HINIC_RXQ_XSTATS_NUM (sizeof(hinic_rxq_stats_strings) / \
220 		sizeof(hinic_rxq_stats_strings[0]))
221 
222 static const struct hinic_xstats_name_off hinic_txq_stats_strings[] = {
223 	{"tx_busy", offsetof(struct hinic_txq_stats, tx_busy)},
224 	{"offload_errors", offsetof(struct hinic_txq_stats, off_errs)},
225 	{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
226 	{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
227 	{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
228 };
229 
230 #define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
231 		sizeof(hinic_txq_stats_strings[0]))
232 
233 static int hinic_xstats_calc_num(struct hinic_nic_dev *nic_dev)
234 {
235 	if (HINIC_IS_VF(nic_dev->hwdev)) {
236 		return (HINIC_VPORT_XSTATS_NUM +
237 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
238 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
239 	} else {
240 		return (HINIC_VPORT_XSTATS_NUM +
241 			HINIC_PHYPORT_XSTATS_NUM +
242 			HINIC_RXQ_XSTATS_NUM * nic_dev->num_rq +
243 			HINIC_TXQ_XSTATS_NUM * nic_dev->num_sq);
244 	}
245 }
246 
247 static const struct rte_eth_desc_lim hinic_rx_desc_lim = {
248 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
249 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
250 	.nb_align = HINIC_RXD_ALIGN,
251 };
252 
253 static const struct rte_eth_desc_lim hinic_tx_desc_lim = {
254 	.nb_max = HINIC_MAX_QUEUE_DEPTH,
255 	.nb_min = HINIC_MIN_QUEUE_DEPTH,
256 	.nb_align = HINIC_TXD_ALIGN,
257 };
258 
259 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask);
260 
261 /**
262  * Interrupt handler triggered by NIC  for handling
263  * specific event.
264  *
265  * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
266  */
267 static void hinic_dev_interrupt_handler(void *param)
268 {
269 	struct rte_eth_dev *dev = param;
270 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
271 
272 	if (!hinic_test_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status)) {
273 		PMD_DRV_LOG(WARNING, "Device's interrupt is disabled, ignore interrupt event, dev_name: %s, port_id: %d",
274 			    nic_dev->proc_dev_name, dev->data->port_id);
275 		return;
276 	}
277 
278 	/* aeq0 msg handler */
279 	hinic_dev_handle_aeq_event(nic_dev->hwdev, param);
280 }
281 
282 /**
283  * Ethernet device configuration.
284  *
285  * Prepare the driver for a given number of TX and RX queues, mtu size
286  * and configure RSS.
287  *
288  * @param dev
289  *   Pointer to Ethernet device structure.
290  *
291  * @return
292  *   0 on success, negative error value otherwise.
293  */
294 static int hinic_dev_configure(struct rte_eth_dev *dev)
295 {
296 	struct hinic_nic_dev *nic_dev;
297 	struct hinic_nic_io *nic_io;
298 	int err;
299 
300 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
301 	nic_io = nic_dev->hwdev->nic_io;
302 
303 	nic_dev->num_sq =  dev->data->nb_tx_queues;
304 	nic_dev->num_rq = dev->data->nb_rx_queues;
305 
306 	nic_io->num_sqs =  dev->data->nb_tx_queues;
307 	nic_io->num_rqs = dev->data->nb_rx_queues;
308 
309 	/* queue pair is max_num(sq, rq) */
310 	nic_dev->num_qps = (nic_dev->num_sq > nic_dev->num_rq) ?
311 			nic_dev->num_sq : nic_dev->num_rq;
312 	nic_io->num_qps = nic_dev->num_qps;
313 
314 	if (nic_dev->num_qps > nic_io->max_qps) {
315 		PMD_DRV_LOG(ERR,
316 			"Queue number out of range, get queue_num:%d, max_queue_num:%d",
317 			nic_dev->num_qps, nic_io->max_qps);
318 		return -EINVAL;
319 	}
320 
321 	if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
322 		dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
323 
324 	/* mtu size is 256~9600 */
325 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
326 	    dev->data->dev_conf.rxmode.max_rx_pkt_len >
327 	    HINIC_MAX_JUMBO_FRAME_SIZE) {
328 		PMD_DRV_LOG(ERR,
329 			"Max rx pkt len out of range, get max_rx_pkt_len:%d, "
330 			"expect between %d and %d",
331 			dev->data->dev_conf.rxmode.max_rx_pkt_len,
332 			HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
333 		return -EINVAL;
334 	}
335 
336 	nic_dev->mtu_size =
337 		HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
338 
339 	/* rss template */
340 	err = hinic_config_mq_mode(dev, TRUE);
341 	if (err) {
342 		PMD_DRV_LOG(ERR, "Config multi-queue failed");
343 		return err;
344 	}
345 
346 	/* init vlan offoad */
347 	err = hinic_vlan_offload_set(dev,
348 				ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
349 	if (err) {
350 		PMD_DRV_LOG(ERR, "Initialize vlan filter and strip failed\n");
351 		(void)hinic_config_mq_mode(dev, FALSE);
352 		return err;
353 	}
354 
355 	/*clear fdir filter flag in function table*/
356 	hinic_free_fdir_filter(nic_dev);
357 
358 	return HINIC_OK;
359 }
360 
361 /**
362  * DPDK callback to create the receive queue.
363  *
364  * @param dev
365  *   Pointer to Ethernet device structure.
366  * @param queue_idx
367  *   RX queue index.
368  * @param nb_desc
369  *   Number of descriptors for receive queue.
370  * @param socket_id
371  *   NUMA socket on which memory must be allocated.
372  * @param rx_conf
373  *   Thresholds parameters (unused_).
374  * @param mp
375  *   Memory pool for buffer allocations.
376  *
377  * @return
378  *   0 on success, negative error value otherwise.
379  */
380 static int hinic_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
381 			 uint16_t nb_desc, unsigned int socket_id,
382 			 __rte_unused const struct rte_eth_rxconf *rx_conf,
383 			 struct rte_mempool *mp)
384 {
385 	int rc;
386 	struct hinic_nic_dev *nic_dev;
387 	struct hinic_hwdev *hwdev;
388 	struct hinic_rxq *rxq;
389 	u16 rq_depth, rx_free_thresh;
390 	u32 buf_size;
391 
392 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
393 	hwdev = nic_dev->hwdev;
394 
395 	/* queue depth must be power of 2, otherwise will be aligned up */
396 	rq_depth = (nb_desc & (nb_desc - 1)) ?
397 		((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
398 
399 	/*
400 	 * Validate number of receive descriptors.
401 	 * It must not exceed hardware maximum and minimum.
402 	 */
403 	if (rq_depth > HINIC_MAX_QUEUE_DEPTH ||
404 		rq_depth < HINIC_MIN_QUEUE_DEPTH) {
405 		PMD_DRV_LOG(ERR, "RX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
406 			    HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
407 			    (int)nb_desc, (int)rq_depth,
408 			    (int)dev->data->port_id, (int)queue_idx);
409 		return -EINVAL;
410 	}
411 
412 	/*
413 	 * The RX descriptor ring will be cleaned after rxq->rx_free_thresh
414 	 * descriptors are used or if the number of descriptors required
415 	 * to transmit a packet is greater than the number of free RX
416 	 * descriptors.
417 	 * The following constraints must be satisfied:
418 	 *  rx_free_thresh must be greater than 0.
419 	 *  rx_free_thresh must be less than the size of the ring minus 1.
420 	 * When set to zero use default values.
421 	 */
422 	rx_free_thresh = (u16)((rx_conf->rx_free_thresh) ?
423 			rx_conf->rx_free_thresh : HINIC_DEFAULT_RX_FREE_THRESH);
424 	if (rx_free_thresh >= (rq_depth - 1)) {
425 		PMD_DRV_LOG(ERR, "rx_free_thresh must be less than the number of RX descriptors minus 1. (rx_free_thresh=%u port=%d queue=%d)",
426 			    (unsigned int)rx_free_thresh,
427 			    (int)dev->data->port_id,
428 			    (int)queue_idx);
429 		return -EINVAL;
430 	}
431 
432 	rxq = rte_zmalloc_socket("hinic_rx_queue", sizeof(struct hinic_rxq),
433 				 RTE_CACHE_LINE_SIZE, socket_id);
434 	if (!rxq) {
435 		PMD_DRV_LOG(ERR, "Allocate rxq[%d] failed, dev_name: %s",
436 			    queue_idx, dev->data->name);
437 		return -ENOMEM;
438 	}
439 	nic_dev->rxqs[queue_idx] = rxq;
440 
441 	/* alloc rx sq hw wqepage*/
442 	rc = hinic_create_rq(hwdev, queue_idx, rq_depth);
443 	if (rc) {
444 		PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
445 			    queue_idx, dev->data->name, rq_depth);
446 		goto ceate_rq_fail;
447 	}
448 
449 	/* mbuf pool must be assigned before setup rx resources */
450 	rxq->mb_pool = mp;
451 
452 	rc =
453 	hinic_convert_rx_buf_size(rte_pktmbuf_data_room_size(rxq->mb_pool) -
454 				  RTE_PKTMBUF_HEADROOM, &buf_size);
455 	if (rc) {
456 		PMD_DRV_LOG(ERR, "Adjust buf size failed, dev_name: %s",
457 			    dev->data->name);
458 		goto adjust_bufsize_fail;
459 	}
460 
461 	/* rx queue info, rearm control */
462 	rxq->wq = &hwdev->nic_io->rq_wq[queue_idx];
463 	rxq->pi_virt_addr = hwdev->nic_io->qps[queue_idx].rq.pi_virt_addr;
464 	rxq->nic_dev = nic_dev;
465 	rxq->q_id = queue_idx;
466 	rxq->q_depth = rq_depth;
467 	rxq->buf_len = (u16)buf_size;
468 	rxq->rx_free_thresh = rx_free_thresh;
469 
470 	/* the last point cant do mbuf rearm in bulk */
471 	rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
472 
473 	/* device port identifier */
474 	rxq->port_id = dev->data->port_id;
475 
476 	/* alloc rx_cqe and prepare rq_wqe */
477 	rc = hinic_setup_rx_resources(rxq);
478 	if (rc) {
479 		PMD_DRV_LOG(ERR, "Setup rxq[%d] rx_resources failed, dev_name:%s",
480 			    queue_idx, dev->data->name);
481 		goto setup_rx_res_err;
482 	}
483 
484 	/* record nic_dev rxq in rte_eth rx_queues */
485 	dev->data->rx_queues[queue_idx] = rxq;
486 
487 	return 0;
488 
489 setup_rx_res_err:
490 adjust_bufsize_fail:
491 	hinic_destroy_rq(hwdev, queue_idx);
492 
493 ceate_rq_fail:
494 	rte_free(rxq);
495 
496 	return rc;
497 }
498 
499 static void hinic_reset_rx_queue(struct rte_eth_dev *dev)
500 {
501 	struct hinic_rxq *rxq;
502 	struct hinic_nic_dev *nic_dev;
503 	int q_id = 0;
504 
505 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
506 
507 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
508 		rxq = dev->data->rx_queues[q_id];
509 
510 		rxq->wq->cons_idx = 0;
511 		rxq->wq->prod_idx = 0;
512 		rxq->wq->delta = rxq->q_depth;
513 		rxq->wq->mask = rxq->q_depth - 1;
514 
515 		/* alloc mbuf to rq */
516 		hinic_rx_alloc_pkts(rxq);
517 	}
518 }
519 
520 /**
521  * DPDK callback to configure the transmit queue.
522  *
523  * @param dev
524  *   Pointer to Ethernet device structure.
525  * @param queue_idx
526  *   Transmit queue index.
527  * @param nb_desc
528  *   Number of descriptors for transmit queue.
529  * @param socket_id
530  *   NUMA socket on which memory must be allocated.
531  * @param tx_conf
532  *   Tx queue configuration parameters.
533  *
534  * @return
535  *   0 on success, negative error value otherwise.
536  */
537 static int hinic_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
538 			 uint16_t nb_desc, unsigned int socket_id,
539 			 __rte_unused const struct rte_eth_txconf *tx_conf)
540 {
541 	int rc;
542 	struct hinic_nic_dev *nic_dev;
543 	struct hinic_hwdev *hwdev;
544 	struct hinic_txq *txq;
545 	u16 sq_depth, tx_free_thresh;
546 
547 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
548 	hwdev = nic_dev->hwdev;
549 
550 	/* queue depth must be power of 2, otherwise will be aligned up */
551 	sq_depth = (nb_desc & (nb_desc - 1)) ?
552 			((u16)(1U << (ilog2(nb_desc) + 1))) : nb_desc;
553 
554 	/*
555 	 * Validate number of transmit descriptors.
556 	 * It must not exceed hardware maximum and minimum.
557 	 */
558 	if (sq_depth > HINIC_MAX_QUEUE_DEPTH ||
559 		sq_depth < HINIC_MIN_QUEUE_DEPTH) {
560 		PMD_DRV_LOG(ERR, "TX queue depth is out of range from %d to %d, (nb_desc=%d, q_depth=%d, port=%d queue=%d)",
561 			  HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH,
562 			  (int)nb_desc, (int)sq_depth,
563 			  (int)dev->data->port_id, (int)queue_idx);
564 		return -EINVAL;
565 	}
566 
567 	/*
568 	 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
569 	 * descriptors are used or if the number of descriptors required
570 	 * to transmit a packet is greater than the number of free TX
571 	 * descriptors.
572 	 * The following constraints must be satisfied:
573 	 *  tx_free_thresh must be greater than 0.
574 	 *  tx_free_thresh must be less than the size of the ring minus 1.
575 	 * When set to zero use default values.
576 	 */
577 	tx_free_thresh = (u16)((tx_conf->tx_free_thresh) ?
578 			tx_conf->tx_free_thresh : HINIC_DEFAULT_TX_FREE_THRESH);
579 	if (tx_free_thresh >= (sq_depth - 1)) {
580 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number of TX descriptors minus 1. (tx_free_thresh=%u port=%d queue=%d)",
581 			(unsigned int)tx_free_thresh, (int)dev->data->port_id,
582 			(int)queue_idx);
583 		return -EINVAL;
584 	}
585 
586 	txq = rte_zmalloc_socket("hinic_tx_queue", sizeof(struct hinic_txq),
587 				 RTE_CACHE_LINE_SIZE, socket_id);
588 	if (!txq) {
589 		PMD_DRV_LOG(ERR, "Allocate txq[%d] failed, dev_name: %s",
590 			    queue_idx, dev->data->name);
591 		return -ENOMEM;
592 	}
593 	nic_dev->txqs[queue_idx] = txq;
594 
595 	/* alloc tx sq hw wqepage */
596 	rc = hinic_create_sq(hwdev, queue_idx, sq_depth);
597 	if (rc) {
598 		PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
599 			    queue_idx, dev->data->name, sq_depth);
600 		goto create_sq_fail;
601 	}
602 
603 	txq->q_id = queue_idx;
604 	txq->q_depth = sq_depth;
605 	txq->port_id = dev->data->port_id;
606 	txq->tx_free_thresh = tx_free_thresh;
607 	txq->nic_dev = nic_dev;
608 	txq->wq = &hwdev->nic_io->sq_wq[queue_idx];
609 	txq->sq = &hwdev->nic_io->qps[queue_idx].sq;
610 	txq->cons_idx_addr = hwdev->nic_io->qps[queue_idx].sq.cons_idx_addr;
611 	txq->sq_head_addr = HINIC_GET_WQ_HEAD(txq);
612 	txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
613 					sizeof(struct hinic_sq_bufdesc);
614 	txq->cos = nic_dev->default_cos;
615 
616 	/* alloc software txinfo */
617 	rc = hinic_setup_tx_resources(txq);
618 	if (rc) {
619 		PMD_DRV_LOG(ERR, "Setup txq[%d] tx_resources failed, dev_name: %s",
620 			    queue_idx, dev->data->name);
621 		goto setup_tx_res_fail;
622 	}
623 
624 	/* record nic_dev txq in rte_eth tx_queues */
625 	dev->data->tx_queues[queue_idx] = txq;
626 
627 	return HINIC_OK;
628 
629 setup_tx_res_fail:
630 	hinic_destroy_sq(hwdev, queue_idx);
631 
632 create_sq_fail:
633 	rte_free(txq);
634 
635 	return rc;
636 }
637 
638 static void hinic_reset_tx_queue(struct rte_eth_dev *dev)
639 {
640 	struct hinic_nic_dev *nic_dev;
641 	struct hinic_txq *txq;
642 	struct hinic_nic_io *nic_io;
643 	struct hinic_hwdev *hwdev;
644 	volatile u32 *ci_addr;
645 	int q_id = 0;
646 
647 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
648 	hwdev = nic_dev->hwdev;
649 	nic_io = hwdev->nic_io;
650 
651 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++) {
652 		txq = dev->data->tx_queues[q_id];
653 
654 		txq->wq->cons_idx = 0;
655 		txq->wq->prod_idx = 0;
656 		txq->wq->delta = txq->q_depth;
657 		txq->wq->mask  = txq->q_depth - 1;
658 
659 		/* clear hardware ci */
660 		ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base,
661 							q_id);
662 		*ci_addr = 0;
663 	}
664 }
665 
666 /**
667  * Get link speed from NIC.
668  *
669  * @param dev
670  *   Pointer to Ethernet device structure.
671  * @param speed_capa
672  *   Pointer to link speed structure.
673  */
674 static void hinic_get_speed_capa(struct rte_eth_dev *dev, uint32_t *speed_capa)
675 {
676 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
677 	u32 supported_link, advertised_link;
678 	int err;
679 
680 #define HINIC_LINK_MODE_SUPPORT_1G	(1U << HINIC_GE_BASE_KX)
681 
682 #define HINIC_LINK_MODE_SUPPORT_10G	(1U << HINIC_10GE_BASE_KR)
683 
684 #define HINIC_LINK_MODE_SUPPORT_25G	((1U << HINIC_25GE_BASE_KR_S) | \
685 					(1U << HINIC_25GE_BASE_CR_S) | \
686 					(1U << HINIC_25GE_BASE_KR) | \
687 					(1U << HINIC_25GE_BASE_CR))
688 
689 #define HINIC_LINK_MODE_SUPPORT_40G	((1U << HINIC_40GE_BASE_KR4) | \
690 					(1U << HINIC_40GE_BASE_CR4))
691 
692 #define HINIC_LINK_MODE_SUPPORT_100G	((1U << HINIC_100GE_BASE_KR4) | \
693 					(1U << HINIC_100GE_BASE_CR4))
694 
695 	err = hinic_get_link_mode(nic_dev->hwdev,
696 				  &supported_link, &advertised_link);
697 	if (err || supported_link == HINIC_SUPPORTED_UNKNOWN ||
698 	    advertised_link == HINIC_SUPPORTED_UNKNOWN) {
699 		PMD_DRV_LOG(WARNING, "Get speed capability info failed, device: %s, port_id: %u",
700 			  nic_dev->proc_dev_name, dev->data->port_id);
701 	} else {
702 		*speed_capa = 0;
703 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_1G))
704 			*speed_capa |= ETH_LINK_SPEED_1G;
705 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_10G))
706 			*speed_capa |= ETH_LINK_SPEED_10G;
707 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_25G))
708 			*speed_capa |= ETH_LINK_SPEED_25G;
709 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_40G))
710 			*speed_capa |= ETH_LINK_SPEED_40G;
711 		if (!!(supported_link & HINIC_LINK_MODE_SUPPORT_100G))
712 			*speed_capa |= ETH_LINK_SPEED_100G;
713 	}
714 }
715 
716 /**
717  * DPDK callback to get information about the device.
718  *
719  * @param dev
720  *   Pointer to Ethernet device structure.
721  * @param info
722  *   Pointer to Info structure output buffer.
723  */
724 static int
725 hinic_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
726 {
727 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
728 
729 	info->max_rx_queues  = nic_dev->nic_cap.max_rqs;
730 	info->max_tx_queues  = nic_dev->nic_cap.max_sqs;
731 	info->min_rx_bufsize = HINIC_MIN_RX_BUF_SIZE;
732 	info->max_rx_pktlen  = HINIC_MAX_JUMBO_FRAME_SIZE;
733 	info->max_mac_addrs  = HINIC_MAX_UC_MAC_ADDRS;
734 	info->min_mtu = HINIC_MIN_MTU_SIZE;
735 	info->max_mtu = HINIC_MAX_MTU_SIZE;
736 
737 	hinic_get_speed_capa(dev, &info->speed_capa);
738 	info->rx_queue_offload_capa = 0;
739 	info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
740 				DEV_RX_OFFLOAD_IPV4_CKSUM |
741 				DEV_RX_OFFLOAD_UDP_CKSUM |
742 				DEV_RX_OFFLOAD_TCP_CKSUM |
743 				DEV_RX_OFFLOAD_VLAN_FILTER |
744 				DEV_RX_OFFLOAD_SCATTER |
745 				DEV_RX_OFFLOAD_JUMBO_FRAME |
746 				DEV_RX_OFFLOAD_TCP_LRO |
747 				DEV_RX_OFFLOAD_RSS_HASH;
748 
749 	info->tx_queue_offload_capa = 0;
750 	info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
751 				DEV_TX_OFFLOAD_IPV4_CKSUM |
752 				DEV_TX_OFFLOAD_UDP_CKSUM |
753 				DEV_TX_OFFLOAD_TCP_CKSUM |
754 				DEV_TX_OFFLOAD_SCTP_CKSUM |
755 				DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
756 				DEV_TX_OFFLOAD_TCP_TSO |
757 				DEV_TX_OFFLOAD_MULTI_SEGS;
758 
759 	info->hash_key_size = HINIC_RSS_KEY_SIZE;
760 	info->reta_size = HINIC_RSS_INDIR_SIZE;
761 	info->flow_type_rss_offloads = HINIC_RSS_OFFLOAD_ALL;
762 	info->rx_desc_lim = hinic_rx_desc_lim;
763 	info->tx_desc_lim = hinic_tx_desc_lim;
764 
765 	/* Driver-preferred Rx/Tx parameters */
766 	info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
767 	info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
768 	info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
769 	info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
770 	info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
771 	info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
772 
773 	return 0;
774 }
775 
776 static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
777 				size_t fw_size)
778 {
779 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
780 	char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
781 	int err;
782 
783 	err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
784 	if (err) {
785 		PMD_DRV_LOG(ERR, "Failed to get fw version\n");
786 		return -EINVAL;
787 	}
788 
789 	if (fw_size < strlen(fw_ver) + 1)
790 		return (strlen(fw_ver) + 1);
791 
792 	snprintf(fw_version, fw_size, "%s", fw_ver);
793 
794 	return 0;
795 }
796 
797 static int hinic_config_rx_mode(struct hinic_nic_dev *nic_dev, u32 rx_mode_ctrl)
798 {
799 	int err;
800 
801 	err = hinic_set_rx_mode(nic_dev->hwdev, rx_mode_ctrl);
802 	if (err) {
803 		PMD_DRV_LOG(ERR, "Failed to set rx mode");
804 		return -EINVAL;
805 	}
806 	nic_dev->rx_mode_status = rx_mode_ctrl;
807 
808 	return 0;
809 }
810 
811 
812 static int hinic_rxtx_configure(struct rte_eth_dev *dev)
813 {
814 	int err;
815 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
816 	bool lro_en;
817 
818 	/* rx configure, if rss enable, need to init default configuration */
819 	err = hinic_rx_configure(dev);
820 	if (err) {
821 		PMD_DRV_LOG(ERR, "Configure rss failed");
822 		return err;
823 	}
824 
825 	/* rx mode init */
826 	err = hinic_config_rx_mode(nic_dev, HINIC_DEFAULT_RX_MODE);
827 	if (err) {
828 		PMD_DRV_LOG(ERR, "Configure rx_mode:0x%x failed",
829 			HINIC_DEFAULT_RX_MODE);
830 		goto set_rx_mode_fail;
831 	}
832 
833 	/* config lro */
834 	lro_en = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO ?
835 			true : false;
836 
837 	err = hinic_set_rx_lro(nic_dev->hwdev, lro_en, lro_en,
838 				HINIC_LRO_WQE_NUM_DEFAULT);
839 	if (err) {
840 		PMD_DRV_LOG(ERR, "%s lro failed, err: %d",
841 			lro_en ? "Enable" : "Disable", err);
842 		goto set_rx_mode_fail;
843 	}
844 
845 	return HINIC_OK;
846 
847 set_rx_mode_fail:
848 	hinic_rx_remove_configure(dev);
849 
850 	return err;
851 }
852 
853 static void hinic_remove_rxtx_configure(struct rte_eth_dev *dev)
854 {
855 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
856 
857 	(void)hinic_config_rx_mode(nic_dev, 0);
858 	hinic_rx_remove_configure(dev);
859 }
860 
861 static int hinic_priv_get_dev_link_status(struct hinic_nic_dev *nic_dev,
862 					  struct rte_eth_link *link)
863 {
864 	int rc;
865 	u8 port_link_status = 0;
866 	struct nic_port_info port_link_info;
867 	struct hinic_hwdev *nic_hwdev = nic_dev->hwdev;
868 	uint32_t port_speed[LINK_SPEED_MAX] = {ETH_SPEED_NUM_10M,
869 					ETH_SPEED_NUM_100M, ETH_SPEED_NUM_1G,
870 					ETH_SPEED_NUM_10G, ETH_SPEED_NUM_25G,
871 					ETH_SPEED_NUM_40G, ETH_SPEED_NUM_100G};
872 
873 	rc = hinic_get_link_status(nic_hwdev, &port_link_status);
874 	if (rc)
875 		return rc;
876 
877 	if (!port_link_status) {
878 		link->link_status = ETH_LINK_DOWN;
879 		link->link_speed = 0;
880 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
881 		link->link_autoneg = ETH_LINK_FIXED;
882 		return HINIC_OK;
883 	}
884 
885 	memset(&port_link_info, 0, sizeof(port_link_info));
886 	rc = hinic_get_port_info(nic_hwdev, &port_link_info);
887 	if (rc)
888 		return rc;
889 
890 	link->link_speed = port_speed[port_link_info.speed % LINK_SPEED_MAX];
891 	link->link_duplex = port_link_info.duplex;
892 	link->link_autoneg = port_link_info.autoneg_state;
893 	link->link_status = port_link_status;
894 
895 	return HINIC_OK;
896 }
897 
898 /**
899  * DPDK callback to retrieve physical link information.
900  *
901  * @param dev
902  *   Pointer to Ethernet device structure.
903  * @param wait_to_complete
904  *   Wait for request completion.
905  *
906  * @return
907  *   0 link status changed, -1 link status not changed
908  */
909 static int hinic_link_update(struct rte_eth_dev *dev, int wait_to_complete)
910 {
911 #define CHECK_INTERVAL 10  /* 10ms */
912 #define MAX_REPEAT_TIME 100  /* 1s (100 * 10ms) in total */
913 	int rc = HINIC_OK;
914 	struct rte_eth_link link;
915 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
916 	unsigned int rep_cnt = MAX_REPEAT_TIME;
917 
918 	memset(&link, 0, sizeof(link));
919 	do {
920 		/* Get link status information from hardware */
921 		rc = hinic_priv_get_dev_link_status(nic_dev, &link);
922 		if (rc != HINIC_OK) {
923 			link.link_speed = ETH_SPEED_NUM_NONE;
924 			link.link_duplex = ETH_LINK_FULL_DUPLEX;
925 			PMD_DRV_LOG(ERR, "Get link status failed");
926 			goto out;
927 		}
928 
929 		if (!wait_to_complete || link.link_status)
930 			break;
931 
932 		rte_delay_ms(CHECK_INTERVAL);
933 	} while (rep_cnt--);
934 
935 out:
936 	rc = rte_eth_linkstatus_set(dev, &link);
937 	return rc;
938 }
939 
940 /**
941  * DPDK callback to bring the link UP.
942  *
943  * @param dev
944  *   Pointer to Ethernet device structure.
945  *
946  * @return
947  *   0 on success, negative errno value on failure.
948  */
949 static int hinic_dev_set_link_up(struct rte_eth_dev *dev)
950 {
951 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
952 	int ret;
953 
954 	ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, true);
955 	if (ret) {
956 		PMD_DRV_LOG(ERR, "Enable port tx xsfp failed, dev_name: %s, port_id: %d",
957 			    nic_dev->proc_dev_name, dev->data->port_id);
958 		return ret;
959 	}
960 
961 	/* link status follow phy port status, up will open pma */
962 	ret = hinic_set_port_enable(nic_dev->hwdev, true);
963 	if (ret)
964 		PMD_DRV_LOG(ERR, "Set mac link up failed, dev_name: %s, port_id: %d",
965 			    nic_dev->proc_dev_name, dev->data->port_id);
966 
967 	return ret;
968 }
969 
970 /**
971  * DPDK callback to bring the link DOWN.
972  *
973  * @param dev
974  *   Pointer to Ethernet device structure.
975  *
976  * @return
977  *   0 on success, negative errno value on failure.
978  */
979 static int hinic_dev_set_link_down(struct rte_eth_dev *dev)
980 {
981 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
982 	int ret;
983 
984 	ret = hinic_set_xsfp_tx_status(nic_dev->hwdev, false);
985 	if (ret) {
986 		PMD_DRV_LOG(ERR, "Disable port tx xsfp failed, dev_name: %s, port_id: %d",
987 			    nic_dev->proc_dev_name, dev->data->port_id);
988 		return ret;
989 	}
990 
991 	/* link status follow phy port status, up will close pma */
992 	ret = hinic_set_port_enable(nic_dev->hwdev, false);
993 	if (ret)
994 		PMD_DRV_LOG(ERR, "Set mac link down failed, dev_name: %s, port_id: %d",
995 			    nic_dev->proc_dev_name, dev->data->port_id);
996 
997 	return ret;
998 }
999 
1000 /**
1001  * DPDK callback to start the device.
1002  *
1003  * @param dev
1004  *   Pointer to Ethernet device structure.
1005  *
1006  * @return
1007  *   0 on success, negative errno value on failure.
1008  */
1009 static int hinic_dev_start(struct rte_eth_dev *dev)
1010 {
1011 	int rc;
1012 	char *name;
1013 	struct hinic_nic_dev *nic_dev;
1014 
1015 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1016 	name = dev->data->name;
1017 
1018 	/* reset rx and tx queue */
1019 	hinic_reset_rx_queue(dev);
1020 	hinic_reset_tx_queue(dev);
1021 
1022 	/* get func rx buf size */
1023 	hinic_get_func_rx_buf_size(nic_dev);
1024 
1025 	/* init txq and rxq context */
1026 	rc = hinic_init_qp_ctxts(nic_dev->hwdev);
1027 	if (rc) {
1028 		PMD_DRV_LOG(ERR, "Initialize qp context failed, dev_name:%s",
1029 			    name);
1030 		goto init_qp_fail;
1031 	}
1032 
1033 	/* rss template */
1034 	rc = hinic_config_mq_mode(dev, TRUE);
1035 	if (rc) {
1036 		PMD_DRV_LOG(ERR, "Configure mq mode failed, dev_name: %s",
1037 			    name);
1038 		goto cfg_mq_mode_fail;
1039 	}
1040 
1041 	/* set default mtu */
1042 	rc = hinic_set_port_mtu(nic_dev->hwdev, nic_dev->mtu_size);
1043 	if (rc) {
1044 		PMD_DRV_LOG(ERR, "Set mtu_size[%d] failed, dev_name: %s",
1045 			    nic_dev->mtu_size, name);
1046 		goto set_mtu_fail;
1047 	}
1048 
1049 	/* configure rss rx_mode and other rx or tx default feature */
1050 	rc = hinic_rxtx_configure(dev);
1051 	if (rc) {
1052 		PMD_DRV_LOG(ERR, "Configure tx and rx failed, dev_name: %s",
1053 			    name);
1054 		goto cfg_rxtx_fail;
1055 	}
1056 
1057 	/* reactive pf status, so that uP report asyn event */
1058 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_ACTIVE_FLAG);
1059 
1060 	/* open virtual port and ready to start packet receiving */
1061 	rc = hinic_set_vport_enable(nic_dev->hwdev, true);
1062 	if (rc) {
1063 		PMD_DRV_LOG(ERR, "Enable vport failed, dev_name:%s", name);
1064 		goto en_vport_fail;
1065 	}
1066 
1067 	/* open physical port and start packet receiving */
1068 	rc = hinic_set_port_enable(nic_dev->hwdev, true);
1069 	if (rc) {
1070 		PMD_DRV_LOG(ERR, "Enable physical port failed, dev_name:%s",
1071 			    name);
1072 		goto en_port_fail;
1073 	}
1074 
1075 	/* update eth_dev link status */
1076 	if (dev->data->dev_conf.intr_conf.lsc != 0)
1077 		(void)hinic_link_update(dev, 0);
1078 
1079 	hinic_set_bit(HINIC_DEV_START, &nic_dev->dev_status);
1080 
1081 	return 0;
1082 
1083 en_port_fail:
1084 	(void)hinic_set_vport_enable(nic_dev->hwdev, false);
1085 
1086 en_vport_fail:
1087 	hinic_set_pf_status(nic_dev->hwdev->hwif, HINIC_PF_STATUS_INIT);
1088 
1089 	/* Flush tx && rx chip resources in case of set vport fake fail */
1090 	(void)hinic_flush_qp_res(nic_dev->hwdev);
1091 	rte_delay_ms(100);
1092 
1093 	hinic_remove_rxtx_configure(dev);
1094 
1095 cfg_rxtx_fail:
1096 set_mtu_fail:
1097 cfg_mq_mode_fail:
1098 	hinic_free_qp_ctxts(nic_dev->hwdev);
1099 
1100 init_qp_fail:
1101 	hinic_free_all_rx_mbuf(dev);
1102 	hinic_free_all_tx_mbuf(dev);
1103 
1104 	return rc;
1105 }
1106 
1107 /**
1108  * DPDK callback to release the receive queue.
1109  *
1110  * @param queue
1111  *   Generic receive queue pointer.
1112  */
1113 static void hinic_rx_queue_release(void *queue)
1114 {
1115 	struct hinic_rxq *rxq = queue;
1116 	struct hinic_nic_dev *nic_dev;
1117 
1118 	if (!rxq) {
1119 		PMD_DRV_LOG(WARNING, "Rxq is null when release");
1120 		return;
1121 	}
1122 	nic_dev = rxq->nic_dev;
1123 
1124 	/* free rxq_pkt mbuf */
1125 	hinic_free_all_rx_mbufs(rxq);
1126 
1127 	/* free rxq_cqe, rxq_info */
1128 	hinic_free_rx_resources(rxq);
1129 
1130 	/* free root rq wq */
1131 	hinic_destroy_rq(nic_dev->hwdev, rxq->q_id);
1132 
1133 	nic_dev->rxqs[rxq->q_id] = NULL;
1134 
1135 	/* free rxq */
1136 	rte_free(rxq);
1137 }
1138 
1139 /**
1140  * DPDK callback to release the transmit queue.
1141  *
1142  * @param queue
1143  *   Generic transmit queue pointer.
1144  */
1145 static void hinic_tx_queue_release(void *queue)
1146 {
1147 	struct hinic_txq *txq = queue;
1148 	struct hinic_nic_dev *nic_dev;
1149 
1150 	if (!txq) {
1151 		PMD_DRV_LOG(WARNING, "Txq is null when release");
1152 		return;
1153 	}
1154 	nic_dev = txq->nic_dev;
1155 
1156 	/* free txq_pkt mbuf */
1157 	hinic_free_all_tx_mbufs(txq);
1158 
1159 	/* free txq_info */
1160 	hinic_free_tx_resources(txq);
1161 
1162 	/* free root sq wq */
1163 	hinic_destroy_sq(nic_dev->hwdev, txq->q_id);
1164 	nic_dev->txqs[txq->q_id] = NULL;
1165 
1166 	/* free txq */
1167 	rte_free(txq);
1168 }
1169 
1170 static void hinic_free_all_rq(struct hinic_nic_dev *nic_dev)
1171 {
1172 	u16 q_id;
1173 
1174 	for (q_id = 0; q_id < nic_dev->num_rq; q_id++)
1175 		hinic_destroy_rq(nic_dev->hwdev, q_id);
1176 }
1177 
1178 static void hinic_free_all_sq(struct hinic_nic_dev *nic_dev)
1179 {
1180 	u16 q_id;
1181 
1182 	for (q_id = 0; q_id < nic_dev->num_sq; q_id++)
1183 		hinic_destroy_sq(nic_dev->hwdev, q_id);
1184 }
1185 
1186 /**
1187  * DPDK callback to stop the device.
1188  *
1189  * @param dev
1190  *   Pointer to Ethernet device structure.
1191  */
1192 static void hinic_dev_stop(struct rte_eth_dev *dev)
1193 {
1194 	int rc;
1195 	char *name;
1196 	uint16_t port_id;
1197 	struct hinic_nic_dev *nic_dev;
1198 	struct rte_eth_link link;
1199 
1200 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1201 	name = dev->data->name;
1202 	port_id = dev->data->port_id;
1203 
1204 	if (!hinic_test_and_clear_bit(HINIC_DEV_START, &nic_dev->dev_status)) {
1205 		PMD_DRV_LOG(INFO, "Device %s already stopped", name);
1206 		return;
1207 	}
1208 
1209 	/* just stop phy port and vport */
1210 	rc = hinic_set_port_enable(nic_dev->hwdev, false);
1211 	if (rc)
1212 		PMD_DRV_LOG(WARNING, "Disable phy port failed, error: %d, dev_name:%s, port_id:%d",
1213 			  rc, name, port_id);
1214 
1215 	rc = hinic_set_vport_enable(nic_dev->hwdev, false);
1216 	if (rc)
1217 		PMD_DRV_LOG(WARNING, "Disable vport failed, error: %d, dev_name:%s, port_id:%d",
1218 			  rc, name, port_id);
1219 
1220 	/* Clear recorded link status */
1221 	memset(&link, 0, sizeof(link));
1222 	(void)rte_eth_linkstatus_set(dev, &link);
1223 
1224 	/* flush pending io request */
1225 	rc = hinic_rx_tx_flush(nic_dev->hwdev);
1226 	if (rc)
1227 		PMD_DRV_LOG(WARNING, "Flush pending io failed, error: %d, dev_name: %s, port_id: %d",
1228 			    rc, name, port_id);
1229 
1230 	/* clean rss table and rx_mode */
1231 	hinic_remove_rxtx_configure(dev);
1232 
1233 	/* clean root context */
1234 	hinic_free_qp_ctxts(nic_dev->hwdev);
1235 
1236 	hinic_free_fdir_filter(nic_dev);
1237 
1238 	/* free mbuf */
1239 	hinic_free_all_rx_mbuf(dev);
1240 	hinic_free_all_tx_mbuf(dev);
1241 }
1242 
1243 static void hinic_disable_interrupt(struct rte_eth_dev *dev)
1244 {
1245 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1246 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1247 	int ret, retries = 0;
1248 
1249 	hinic_clear_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
1250 
1251 	/* disable msix interrupt in hardware */
1252 	hinic_set_msix_state(nic_dev->hwdev, 0, HINIC_MSIX_DISABLE);
1253 
1254 	/* disable rte interrupt */
1255 	ret = rte_intr_disable(&pci_dev->intr_handle);
1256 	if (ret)
1257 		PMD_DRV_LOG(ERR, "Disable intr failed: %d", ret);
1258 
1259 	do {
1260 		ret =
1261 		rte_intr_callback_unregister(&pci_dev->intr_handle,
1262 					     hinic_dev_interrupt_handler, dev);
1263 		if (ret >= 0) {
1264 			break;
1265 		} else if (ret == -EAGAIN) {
1266 			rte_delay_ms(100);
1267 			retries++;
1268 		} else {
1269 			PMD_DRV_LOG(ERR, "intr callback unregister failed: %d",
1270 				    ret);
1271 			break;
1272 		}
1273 	} while (retries < HINIC_INTR_CB_UNREG_MAX_RETRIES);
1274 
1275 	if (retries == HINIC_INTR_CB_UNREG_MAX_RETRIES)
1276 		PMD_DRV_LOG(ERR, "Unregister intr callback failed after %d retries",
1277 			    retries);
1278 }
1279 
1280 static int hinic_set_dev_promiscuous(struct hinic_nic_dev *nic_dev, bool enable)
1281 {
1282 	u32 rx_mode_ctrl = nic_dev->rx_mode_status;
1283 
1284 	if (enable)
1285 		rx_mode_ctrl |= HINIC_RX_MODE_PROMISC;
1286 	else
1287 		rx_mode_ctrl &= (~HINIC_RX_MODE_PROMISC);
1288 
1289 	return hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1290 }
1291 
1292 /**
1293  * DPDK callback to get device statistics.
1294  *
1295  * @param dev
1296  *   Pointer to Ethernet device structure.
1297  * @param stats
1298  *   Stats structure output buffer.
1299  *
1300  * @return
1301  *   0 on success and stats is filled,
1302  *   negative error value otherwise.
1303  */
1304 static int
1305 hinic_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1306 {
1307 	int i, err, q_num;
1308 	u64 rx_discards_pmd = 0;
1309 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1310 	struct hinic_vport_stats vport_stats;
1311 	struct hinic_rxq	*rxq = NULL;
1312 	struct hinic_rxq_stats rxq_stats;
1313 	struct hinic_txq	*txq = NULL;
1314 	struct hinic_txq_stats txq_stats;
1315 
1316 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
1317 	if (err) {
1318 		PMD_DRV_LOG(ERR, "Get vport stats from fw failed, nic_dev: %s",
1319 			nic_dev->proc_dev_name);
1320 		return err;
1321 	}
1322 
1323 	/* rx queue stats */
1324 	q_num = (nic_dev->num_rq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1325 			nic_dev->num_rq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1326 	for (i = 0; i < q_num; i++) {
1327 		rxq = nic_dev->rxqs[i];
1328 		hinic_rxq_get_stats(rxq, &rxq_stats);
1329 		stats->q_ipackets[i] = rxq_stats.packets;
1330 		stats->q_ibytes[i] = rxq_stats.bytes;
1331 		stats->q_errors[i] = rxq_stats.rx_discards;
1332 
1333 		stats->ierrors += rxq_stats.errors;
1334 		rx_discards_pmd += rxq_stats.rx_discards;
1335 		dev->data->rx_mbuf_alloc_failed += rxq_stats.rx_nombuf;
1336 	}
1337 
1338 	/* tx queue stats */
1339 	q_num = (nic_dev->num_sq < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
1340 		nic_dev->num_sq : RTE_ETHDEV_QUEUE_STAT_CNTRS;
1341 	for (i = 0; i < q_num; i++) {
1342 		txq = nic_dev->txqs[i];
1343 		hinic_txq_get_stats(txq, &txq_stats);
1344 		stats->q_opackets[i] = txq_stats.packets;
1345 		stats->q_obytes[i] = txq_stats.bytes;
1346 		stats->oerrors += (txq_stats.tx_busy + txq_stats.off_errs);
1347 	}
1348 
1349 	/* vport stats */
1350 	stats->oerrors += vport_stats.tx_discard_vport;
1351 
1352 	stats->imissed = vport_stats.rx_discard_vport + rx_discards_pmd;
1353 
1354 	stats->ipackets = (vport_stats.rx_unicast_pkts_vport +
1355 			vport_stats.rx_multicast_pkts_vport +
1356 			vport_stats.rx_broadcast_pkts_vport -
1357 			rx_discards_pmd);
1358 
1359 	stats->opackets = (vport_stats.tx_unicast_pkts_vport +
1360 			vport_stats.tx_multicast_pkts_vport +
1361 			vport_stats.tx_broadcast_pkts_vport);
1362 
1363 	stats->ibytes = (vport_stats.rx_unicast_bytes_vport +
1364 			vport_stats.rx_multicast_bytes_vport +
1365 			vport_stats.rx_broadcast_bytes_vport);
1366 
1367 	stats->obytes = (vport_stats.tx_unicast_bytes_vport +
1368 			vport_stats.tx_multicast_bytes_vport +
1369 			vport_stats.tx_broadcast_bytes_vport);
1370 	return 0;
1371 }
1372 
1373 /**
1374  * DPDK callback to clear device statistics.
1375  *
1376  * @param dev
1377  *   Pointer to Ethernet device structure.
1378  */
1379 static int hinic_dev_stats_reset(struct rte_eth_dev *dev)
1380 {
1381 	int qid;
1382 	struct hinic_rxq	*rxq = NULL;
1383 	struct hinic_txq	*txq = NULL;
1384 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1385 	int ret;
1386 
1387 	ret = hinic_clear_vport_stats(nic_dev->hwdev);
1388 	if (ret != 0)
1389 		return ret;
1390 
1391 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
1392 		rxq = nic_dev->rxqs[qid];
1393 		hinic_rxq_stats_reset(rxq);
1394 	}
1395 
1396 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
1397 		txq = nic_dev->txqs[qid];
1398 		hinic_txq_stats_reset(txq);
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 /**
1405  * DPDK callback to clear device extended statistics.
1406  *
1407  * @param dev
1408  *   Pointer to Ethernet device structure.
1409  */
1410 static int hinic_dev_xstats_reset(struct rte_eth_dev *dev)
1411 {
1412 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1413 	int ret;
1414 
1415 	ret = hinic_dev_stats_reset(dev);
1416 	if (ret != 0)
1417 		return ret;
1418 
1419 	if (hinic_func_type(nic_dev->hwdev) != TYPE_VF) {
1420 		ret = hinic_clear_phy_port_stats(nic_dev->hwdev);
1421 		if (ret != 0)
1422 			return ret;
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 static void hinic_gen_random_mac_addr(struct rte_ether_addr *mac_addr)
1429 {
1430 	uint64_t random_value;
1431 
1432 	/* Set Organizationally Unique Identifier (OUI) prefix */
1433 	mac_addr->addr_bytes[0] = 0x00;
1434 	mac_addr->addr_bytes[1] = 0x09;
1435 	mac_addr->addr_bytes[2] = 0xC0;
1436 	/* Force indication of locally assigned MAC address. */
1437 	mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
1438 	/* Generate the last 3 bytes of the MAC address with a random number. */
1439 	random_value = rte_rand();
1440 	memcpy(&mac_addr->addr_bytes[3], &random_value, 3);
1441 }
1442 
1443 /**
1444  * Init mac_vlan table in NIC.
1445  *
1446  * @param dev
1447  *   Pointer to Ethernet device structure.
1448  *
1449  * @return
1450  *   0 on success and stats is filled,
1451  *   negative error value otherwise.
1452  */
1453 static int hinic_init_mac_addr(struct rte_eth_dev *eth_dev)
1454 {
1455 	struct hinic_nic_dev *nic_dev =
1456 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1457 	uint8_t addr_bytes[RTE_ETHER_ADDR_LEN];
1458 	u16 func_id = 0;
1459 	int rc = 0;
1460 
1461 	rc = hinic_get_default_mac(nic_dev->hwdev, addr_bytes);
1462 	if (rc)
1463 		return rc;
1464 
1465 	rte_ether_addr_copy((struct rte_ether_addr *)addr_bytes,
1466 		&eth_dev->data->mac_addrs[0]);
1467 	if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[0]))
1468 		hinic_gen_random_mac_addr(&eth_dev->data->mac_addrs[0]);
1469 
1470 	func_id = hinic_global_func_id(nic_dev->hwdev);
1471 	rc = hinic_set_mac(nic_dev->hwdev,
1472 			eth_dev->data->mac_addrs[0].addr_bytes,
1473 			0, func_id);
1474 	if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1475 		return rc;
1476 
1477 	rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
1478 			&nic_dev->default_addr);
1479 
1480 	return 0;
1481 }
1482 
1483 static void hinic_delete_mc_addr_list(struct hinic_nic_dev *nic_dev)
1484 {
1485 	u16 func_id;
1486 	u32 i;
1487 
1488 	func_id = hinic_global_func_id(nic_dev->hwdev);
1489 
1490 	for (i = 0; i < HINIC_MAX_MC_MAC_ADDRS; i++) {
1491 		if (rte_is_zero_ether_addr(&nic_dev->mc_list[i]))
1492 			break;
1493 
1494 		hinic_del_mac(nic_dev->hwdev, nic_dev->mc_list[i].addr_bytes,
1495 			      0, func_id);
1496 		memset(&nic_dev->mc_list[i], 0, sizeof(struct rte_ether_addr));
1497 	}
1498 }
1499 
1500 /**
1501  * Deinit mac_vlan table in NIC.
1502  *
1503  * @param dev
1504  *   Pointer to Ethernet device structure.
1505  *
1506  * @return
1507  *   0 on success and stats is filled,
1508  *   negative error value otherwise.
1509  */
1510 static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
1511 {
1512 	struct hinic_nic_dev *nic_dev =
1513 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1514 	u16 func_id = 0;
1515 	int rc;
1516 	int i;
1517 
1518 	func_id = hinic_global_func_id(nic_dev->hwdev);
1519 
1520 	for (i = 0; i < HINIC_MAX_UC_MAC_ADDRS; i++) {
1521 		if (rte_is_zero_ether_addr(&eth_dev->data->mac_addrs[i]))
1522 			continue;
1523 
1524 		rc = hinic_del_mac(nic_dev->hwdev,
1525 				   eth_dev->data->mac_addrs[i].addr_bytes,
1526 				   0, func_id);
1527 		if (rc && rc != HINIC_PF_SET_VF_ALREADY)
1528 			PMD_DRV_LOG(ERR, "Delete mac table failed, dev_name: %s",
1529 				    eth_dev->data->name);
1530 
1531 		memset(&eth_dev->data->mac_addrs[i], 0,
1532 		       sizeof(struct rte_ether_addr));
1533 	}
1534 
1535 	/* delete multicast mac addrs */
1536 	hinic_delete_mc_addr_list(nic_dev);
1537 }
1538 
1539 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1540 {
1541 	int ret = 0;
1542 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1543 
1544 	PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
1545 			dev->data->port_id, mtu, HINIC_MTU_TO_PKTLEN(mtu));
1546 
1547 	if (mtu < HINIC_MIN_MTU_SIZE || mtu > HINIC_MAX_MTU_SIZE) {
1548 		PMD_DRV_LOG(ERR, "Invalid mtu: %d, must between %d and %d",
1549 				mtu, HINIC_MIN_MTU_SIZE, HINIC_MAX_MTU_SIZE);
1550 		return -EINVAL;
1551 	}
1552 
1553 	ret = hinic_set_port_mtu(nic_dev->hwdev, mtu);
1554 	if (ret) {
1555 		PMD_DRV_LOG(ERR, "Set port mtu failed, ret: %d", ret);
1556 		return ret;
1557 	}
1558 
1559 	/* update max frame size */
1560 	dev->data->dev_conf.rxmode.max_rx_pkt_len = HINIC_MTU_TO_PKTLEN(mtu);
1561 	nic_dev->mtu_size = mtu;
1562 
1563 	return ret;
1564 }
1565 
1566 static void hinic_store_vlan_filter(struct hinic_nic_dev *nic_dev,
1567 					u16 vlan_id, bool on)
1568 {
1569 	u32 vid_idx, vid_bit;
1570 
1571 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1572 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1573 
1574 	if (on)
1575 		nic_dev->vfta[vid_idx] |= vid_bit;
1576 	else
1577 		nic_dev->vfta[vid_idx] &= ~vid_bit;
1578 }
1579 
1580 static bool hinic_find_vlan_filter(struct hinic_nic_dev *nic_dev,
1581 				uint16_t vlan_id)
1582 {
1583 	u32 vid_idx, vid_bit;
1584 
1585 	vid_idx = HINIC_VFTA_IDX(vlan_id);
1586 	vid_bit = HINIC_VFTA_BIT(vlan_id);
1587 
1588 	return (nic_dev->vfta[vid_idx] & vid_bit) ? TRUE : FALSE;
1589 }
1590 
1591 /**
1592  * DPDK callback to set vlan filter.
1593  *
1594  * @param dev
1595  *   Pointer to Ethernet device structure.
1596  * @param vlan_id
1597  *   vlan id is used to filter vlan packets
1598  * @param enable
1599  *   enable disable or enable vlan filter function
1600  */
1601 static int hinic_vlan_filter_set(struct rte_eth_dev *dev,
1602 				uint16_t vlan_id, int enable)
1603 {
1604 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1605 	int err = 0;
1606 	u16 func_id;
1607 
1608 	if (vlan_id > RTE_ETHER_MAX_VLAN_ID)
1609 		return -EINVAL;
1610 
1611 	func_id = hinic_global_func_id(nic_dev->hwdev);
1612 
1613 	if (enable) {
1614 		/* If vlanid is already set, just return */
1615 		if (hinic_find_vlan_filter(nic_dev, vlan_id)) {
1616 			PMD_DRV_LOG(INFO, "Vlan %u has been added, device: %s",
1617 				  vlan_id, nic_dev->proc_dev_name);
1618 			return 0;
1619 		}
1620 
1621 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1622 					    func_id, TRUE);
1623 	} else {
1624 		/* If vlanid can't be found, just return */
1625 		if (!hinic_find_vlan_filter(nic_dev, vlan_id)) {
1626 			PMD_DRV_LOG(INFO, "Vlan %u is not in the vlan filter list, device: %s",
1627 				  vlan_id, nic_dev->proc_dev_name);
1628 			return 0;
1629 		}
1630 
1631 		err = hinic_add_remove_vlan(nic_dev->hwdev, vlan_id,
1632 					    func_id, FALSE);
1633 	}
1634 
1635 	if (err) {
1636 		PMD_DRV_LOG(ERR, "%s vlan failed, func_id: %d, vlan_id: %d, err: %d",
1637 		      enable ? "Add" : "Remove", func_id, vlan_id, err);
1638 		return err;
1639 	}
1640 
1641 	hinic_store_vlan_filter(nic_dev, vlan_id, enable);
1642 
1643 	PMD_DRV_LOG(INFO, "%s vlan %u succeed, device: %s",
1644 		  enable ? "Add" : "Remove", vlan_id, nic_dev->proc_dev_name);
1645 	return 0;
1646 }
1647 
1648 /**
1649  * DPDK callback to enable or disable vlan offload.
1650  *
1651  * @param dev
1652  *   Pointer to Ethernet device structure.
1653  * @param mask
1654  *   Definitions used for VLAN setting
1655  */
1656 static int hinic_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1657 {
1658 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1659 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1660 	bool on;
1661 	int err;
1662 
1663 	/* Enable or disable VLAN filter */
1664 	if (mask & ETH_VLAN_FILTER_MASK) {
1665 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ?
1666 			TRUE : FALSE;
1667 		err = hinic_config_vlan_filter(nic_dev->hwdev, on);
1668 		if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1669 			PMD_DRV_LOG(WARNING,
1670 				"Current matching version does not support vlan filter configuration, device: %s, port_id: %d",
1671 				  nic_dev->proc_dev_name, dev->data->port_id);
1672 		} else if (err) {
1673 			PMD_DRV_LOG(ERR, "Failed to %s vlan filter, device: %s, port_id: %d, err: %d",
1674 				  on ? "enable" : "disable",
1675 				  nic_dev->proc_dev_name,
1676 				  dev->data->port_id, err);
1677 			return err;
1678 		}
1679 
1680 		PMD_DRV_LOG(INFO, "%s vlan filter succeed, device: %s, port_id: %d",
1681 			  on ? "Enable" : "Disable",
1682 			  nic_dev->proc_dev_name, dev->data->port_id);
1683 	}
1684 
1685 	/* Enable or disable VLAN stripping */
1686 	if (mask & ETH_VLAN_STRIP_MASK) {
1687 		on = (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ?
1688 			TRUE : FALSE;
1689 		err = hinic_set_rx_vlan_offload(nic_dev->hwdev, on);
1690 		if (err) {
1691 			PMD_DRV_LOG(ERR, "Failed to %s vlan strip, device: %s, port_id: %d, err: %d",
1692 				  on ? "enable" : "disable",
1693 				  nic_dev->proc_dev_name,
1694 				  dev->data->port_id, err);
1695 			return err;
1696 		}
1697 
1698 		PMD_DRV_LOG(INFO, "%s vlan strip succeed, device: %s, port_id: %d",
1699 			  on ? "Enable" : "Disable",
1700 			  nic_dev->proc_dev_name, dev->data->port_id);
1701 	}
1702 
1703 	if (mask & ETH_VLAN_EXTEND_MASK) {
1704 		PMD_DRV_LOG(ERR, "Don't support vlan qinq, device: %s, port_id: %d",
1705 			  nic_dev->proc_dev_name, dev->data->port_id);
1706 		return -ENOTSUP;
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 static void hinic_remove_all_vlanid(struct rte_eth_dev *eth_dev)
1713 {
1714 	struct hinic_nic_dev *nic_dev =
1715 		HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
1716 	u16 func_id;
1717 	int i;
1718 
1719 	func_id = hinic_global_func_id(nic_dev->hwdev);
1720 	for (i = 0; i <= RTE_ETHER_MAX_VLAN_ID; i++) {
1721 		/* If can't find it, continue */
1722 		if (!hinic_find_vlan_filter(nic_dev, i))
1723 			continue;
1724 
1725 		(void)hinic_add_remove_vlan(nic_dev->hwdev, i, func_id, FALSE);
1726 		hinic_store_vlan_filter(nic_dev, i, false);
1727 	}
1728 }
1729 
1730 static int hinic_set_dev_allmulticast(struct hinic_nic_dev *nic_dev,
1731 				bool enable)
1732 {
1733 	u32 rx_mode_ctrl = nic_dev->rx_mode_status;
1734 
1735 	if (enable)
1736 		rx_mode_ctrl |= HINIC_RX_MODE_MC_ALL;
1737 	else
1738 		rx_mode_ctrl &= (~HINIC_RX_MODE_MC_ALL);
1739 
1740 	return hinic_config_rx_mode(nic_dev, rx_mode_ctrl);
1741 }
1742 
1743 /**
1744  * DPDK callback to enable allmulticast mode.
1745  *
1746  * @param dev
1747  *   Pointer to Ethernet device structure.
1748  *
1749  * @return
1750  *   0 on success,
1751  *   negative error value otherwise.
1752  */
1753 static int hinic_dev_allmulticast_enable(struct rte_eth_dev *dev)
1754 {
1755 	int ret = HINIC_OK;
1756 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1757 
1758 	ret = hinic_set_dev_allmulticast(nic_dev, true);
1759 	if (ret) {
1760 		PMD_DRV_LOG(ERR, "Enable allmulticast failed, error: %d", ret);
1761 		return ret;
1762 	}
1763 
1764 	PMD_DRV_LOG(INFO, "Enable allmulticast succeed, nic_dev: %s, port_id: %d",
1765 		nic_dev->proc_dev_name, dev->data->port_id);
1766 	return 0;
1767 }
1768 
1769 /**
1770  * DPDK callback to disable allmulticast mode.
1771  *
1772  * @param dev
1773  *   Pointer to Ethernet device structure.
1774  *
1775  * @return
1776  *   0 on success,
1777  *   negative error value otherwise.
1778  */
1779 static int hinic_dev_allmulticast_disable(struct rte_eth_dev *dev)
1780 {
1781 	int ret = HINIC_OK;
1782 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1783 
1784 	ret = hinic_set_dev_allmulticast(nic_dev, false);
1785 	if (ret) {
1786 		PMD_DRV_LOG(ERR, "Disable allmulticast failed, error: %d", ret);
1787 		return ret;
1788 	}
1789 
1790 	PMD_DRV_LOG(INFO, "Disable allmulticast succeed, nic_dev: %s, port_id: %d",
1791 		nic_dev->proc_dev_name, dev->data->port_id);
1792 	return 0;
1793 }
1794 
1795 /**
1796  * DPDK callback to enable promiscuous mode.
1797  *
1798  * @param dev
1799  *   Pointer to Ethernet device structure.
1800  *
1801  * @return
1802  *   0 on success,
1803  *   negative error value otherwise.
1804  */
1805 static int hinic_dev_promiscuous_enable(struct rte_eth_dev *dev)
1806 {
1807 	int rc = HINIC_OK;
1808 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1809 
1810 	PMD_DRV_LOG(INFO, "Enable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1811 		    nic_dev->proc_dev_name, dev->data->port_id,
1812 		    dev->data->promiscuous);
1813 
1814 	rc = hinic_set_dev_promiscuous(nic_dev, true);
1815 	if (rc)
1816 		PMD_DRV_LOG(ERR, "Enable promiscuous failed");
1817 
1818 	return rc;
1819 }
1820 
1821 /**
1822  * DPDK callback to disable promiscuous mode.
1823  *
1824  * @param dev
1825  *   Pointer to Ethernet device structure.
1826  *
1827  * @return
1828  *   0 on success,
1829  *   negative error value otherwise.
1830  */
1831 static int hinic_dev_promiscuous_disable(struct rte_eth_dev *dev)
1832 {
1833 	int rc = HINIC_OK;
1834 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1835 
1836 	PMD_DRV_LOG(INFO, "Disable promiscuous, nic_dev: %s, port_id: %d, promisc: %d",
1837 		    nic_dev->proc_dev_name, dev->data->port_id,
1838 		    dev->data->promiscuous);
1839 
1840 	rc = hinic_set_dev_promiscuous(nic_dev, false);
1841 	if (rc)
1842 		PMD_DRV_LOG(ERR, "Disable promiscuous failed");
1843 
1844 	return rc;
1845 }
1846 
1847 /**
1848  * DPDK callback to update the RSS hash key and RSS hash type.
1849  *
1850  * @param dev
1851  *   Pointer to Ethernet device structure.
1852  * @param rss_conf
1853  *   RSS configuration data.
1854  *
1855  * @return
1856  *   0 on success, negative error value otherwise.
1857  */
1858 static int hinic_rss_hash_update(struct rte_eth_dev *dev,
1859 			  struct rte_eth_rss_conf *rss_conf)
1860 {
1861 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1862 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1863 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
1864 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
1865 	u64 rss_hf = rss_conf->rss_hf;
1866 	struct nic_rss_type rss_type = {0};
1867 	int err = 0;
1868 
1869 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
1870 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
1871 		return HINIC_OK;
1872 	}
1873 
1874 	if (rss_conf->rss_key_len > HINIC_RSS_KEY_SIZE) {
1875 		PMD_DRV_LOG(ERR, "Invalid rss key, rss_key_len:%d",
1876 			    rss_conf->rss_key_len);
1877 		return HINIC_ERROR;
1878 	}
1879 
1880 	if (rss_conf->rss_key) {
1881 		memcpy(hashkey, rss_conf->rss_key, rss_conf->rss_key_len);
1882 		err = hinic_rss_set_template_tbl(nic_dev->hwdev, tmpl_idx,
1883 						 hashkey);
1884 		if (err) {
1885 			PMD_DRV_LOG(ERR, "Set rss template table failed");
1886 			goto disable_rss;
1887 		}
1888 	}
1889 
1890 	rss_type.ipv4 = (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4)) ? 1 : 0;
1891 	rss_type.tcp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) ? 1 : 0;
1892 	rss_type.ipv6 = (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6)) ? 1 : 0;
1893 	rss_type.ipv6_ext = (rss_hf & ETH_RSS_IPV6_EX) ? 1 : 0;
1894 	rss_type.tcp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) ? 1 : 0;
1895 	rss_type.tcp_ipv6_ext = (rss_hf & ETH_RSS_IPV6_TCP_EX) ? 1 : 0;
1896 	rss_type.udp_ipv4 = (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) ? 1 : 0;
1897 	rss_type.udp_ipv6 = (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) ? 1 : 0;
1898 
1899 	err = hinic_set_rss_type(nic_dev->hwdev, tmpl_idx, rss_type);
1900 	if (err) {
1901 		PMD_DRV_LOG(ERR, "Set rss type table failed");
1902 		goto disable_rss;
1903 	}
1904 
1905 	return 0;
1906 
1907 disable_rss:
1908 	memset(prio_tc, 0, sizeof(prio_tc));
1909 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
1910 	return err;
1911 }
1912 
1913 /**
1914  * DPDK callback to get the RSS hash configuration.
1915  *
1916  * @param dev
1917  *   Pointer to Ethernet device structure.
1918  * @param rss_conf
1919  *   RSS configuration data.
1920  *
1921  * @return
1922  *   0 on success, negative error value otherwise.
1923  */
1924 static int hinic_rss_conf_get(struct rte_eth_dev *dev,
1925 		       struct rte_eth_rss_conf *rss_conf)
1926 {
1927 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1928 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1929 	u8 hashkey[HINIC_RSS_KEY_SIZE] = {0};
1930 	struct nic_rss_type rss_type = {0};
1931 	int err;
1932 
1933 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG)) {
1934 		PMD_DRV_LOG(WARNING, "RSS is not enabled");
1935 		return HINIC_ERROR;
1936 	}
1937 
1938 	err = hinic_rss_get_template_tbl(nic_dev->hwdev, tmpl_idx, hashkey);
1939 	if (err)
1940 		return err;
1941 
1942 	if (rss_conf->rss_key &&
1943 	    rss_conf->rss_key_len >= HINIC_RSS_KEY_SIZE) {
1944 		memcpy(rss_conf->rss_key, hashkey, sizeof(hashkey));
1945 		rss_conf->rss_key_len = sizeof(hashkey);
1946 	}
1947 
1948 	err = hinic_get_rss_type(nic_dev->hwdev, tmpl_idx, &rss_type);
1949 	if (err)
1950 		return err;
1951 
1952 	rss_conf->rss_hf = 0;
1953 	rss_conf->rss_hf |=  rss_type.ipv4 ?
1954 		(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4) : 0;
1955 	rss_conf->rss_hf |=  rss_type.tcp_ipv4 ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
1956 	rss_conf->rss_hf |=  rss_type.ipv6 ?
1957 		(ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6) : 0;
1958 	rss_conf->rss_hf |=  rss_type.ipv6_ext ? ETH_RSS_IPV6_EX : 0;
1959 	rss_conf->rss_hf |=  rss_type.tcp_ipv6 ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
1960 	rss_conf->rss_hf |=  rss_type.tcp_ipv6_ext ? ETH_RSS_IPV6_TCP_EX : 0;
1961 	rss_conf->rss_hf |=  rss_type.udp_ipv4 ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
1962 	rss_conf->rss_hf |=  rss_type.udp_ipv6 ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
1963 
1964 	return HINIC_OK;
1965 }
1966 
1967 /**
1968  * DPDK callback to update the RETA indirection table.
1969  *
1970  * @param dev
1971  *   Pointer to Ethernet device structure.
1972  * @param reta_conf
1973  *   Pointer to RETA configuration structure array.
1974  * @param reta_size
1975  *   Size of the RETA table.
1976  *
1977  * @return
1978  *   0 on success, negative error value otherwise.
1979  */
1980 static int hinic_rss_indirtbl_update(struct rte_eth_dev *dev,
1981 			      struct rte_eth_rss_reta_entry64 *reta_conf,
1982 			      uint16_t reta_size)
1983 {
1984 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1985 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
1986 	u8 prio_tc[HINIC_DCB_UP_MAX] = {0};
1987 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
1988 	int err = 0;
1989 	u16 i = 0;
1990 	u16 idx, shift;
1991 
1992 	if (!(nic_dev->flags & ETH_MQ_RX_RSS_FLAG))
1993 		return HINIC_OK;
1994 
1995 	if (reta_size != NIC_RSS_INDIR_SIZE) {
1996 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size:%d", reta_size);
1997 		return HINIC_ERROR;
1998 	}
1999 
2000 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2001 	if (err)
2002 		return err;
2003 
2004 	/* update rss indir_tbl */
2005 	for (i = 0; i < reta_size; i++) {
2006 		idx = i / RTE_RETA_GROUP_SIZE;
2007 		shift = i % RTE_RETA_GROUP_SIZE;
2008 		if (reta_conf[idx].mask & (1ULL << shift))
2009 			indirtbl[i] = reta_conf[idx].reta[shift];
2010 	}
2011 
2012 	for (i = 0 ; i < reta_size; i++) {
2013 		if (indirtbl[i] >= nic_dev->num_rq) {
2014 			PMD_DRV_LOG(ERR, "Invalid reta entry, index:%d, num_rq:%d",
2015 				    i, nic_dev->num_rq);
2016 			goto disable_rss;
2017 		}
2018 	}
2019 
2020 	err = hinic_rss_set_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2021 	if (err)
2022 		goto disable_rss;
2023 
2024 	nic_dev->rss_indir_flag = true;
2025 
2026 	return 0;
2027 
2028 disable_rss:
2029 	memset(prio_tc, 0, sizeof(prio_tc));
2030 	(void)hinic_rss_cfg(nic_dev->hwdev, 0, tmpl_idx, 0, prio_tc);
2031 
2032 	return HINIC_ERROR;
2033 }
2034 
2035 
2036 /**
2037  * DPDK callback to get the RETA indirection table.
2038  *
2039  * @param dev
2040  *   Pointer to Ethernet device structure.
2041  * @param reta_conf
2042  *   Pointer to RETA configuration structure array.
2043  * @param reta_size
2044  *   Size of the RETA table.
2045  *
2046  * @return
2047  *   0 on success, negative error value otherwise.
2048  */
2049 static int hinic_rss_indirtbl_query(struct rte_eth_dev *dev,
2050 			     struct rte_eth_rss_reta_entry64 *reta_conf,
2051 			     uint16_t reta_size)
2052 {
2053 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2054 	u8 tmpl_idx = nic_dev->rss_tmpl_idx;
2055 	int err = 0;
2056 	u32 indirtbl[NIC_RSS_INDIR_SIZE] = {0};
2057 	u16 idx, shift;
2058 	u16 i = 0;
2059 
2060 	if (reta_size != NIC_RSS_INDIR_SIZE) {
2061 		PMD_DRV_LOG(ERR, "Invalid reta size, reta_size:%d", reta_size);
2062 		return HINIC_ERROR;
2063 	}
2064 
2065 	err = hinic_rss_get_indir_tbl(nic_dev->hwdev, tmpl_idx, indirtbl);
2066 	if (err) {
2067 		PMD_DRV_LOG(ERR, "Get rss indirect table failed, error:%d",
2068 			    err);
2069 		return err;
2070 	}
2071 
2072 	for (i = 0; i < reta_size; i++) {
2073 		idx = i / RTE_RETA_GROUP_SIZE;
2074 		shift = i % RTE_RETA_GROUP_SIZE;
2075 		if (reta_conf[idx].mask & (1ULL << shift))
2076 			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
2077 	}
2078 
2079 	return HINIC_OK;
2080 }
2081 
2082 /**
2083  * DPDK callback to get extended device statistics.
2084  *
2085  * @param dev
2086  *   Pointer to Ethernet device.
2087  * @param xstats
2088  *   Pointer to rte extended stats table.
2089  * @param n
2090  *   The size of the stats table.
2091  *
2092  * @return
2093  *   Number of extended stats on success and stats is filled,
2094  *   negative error value otherwise.
2095  */
2096 static int hinic_dev_xstats_get(struct rte_eth_dev *dev,
2097 			 struct rte_eth_xstat *xstats,
2098 			 unsigned int n)
2099 {
2100 	u16 qid = 0;
2101 	u32 i;
2102 	int err, count;
2103 	struct hinic_nic_dev *nic_dev;
2104 	struct hinic_phy_port_stats port_stats;
2105 	struct hinic_vport_stats vport_stats;
2106 	struct hinic_rxq	*rxq = NULL;
2107 	struct hinic_rxq_stats rxq_stats;
2108 	struct hinic_txq	*txq = NULL;
2109 	struct hinic_txq_stats txq_stats;
2110 
2111 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2112 	count = hinic_xstats_calc_num(nic_dev);
2113 	if ((int)n < count)
2114 		return count;
2115 
2116 	count = 0;
2117 
2118 	/* Get stats from hinic_rxq_stats */
2119 	for (qid = 0; qid < nic_dev->num_rq; qid++) {
2120 		rxq = nic_dev->rxqs[qid];
2121 		hinic_rxq_get_stats(rxq, &rxq_stats);
2122 
2123 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2124 			xstats[count].value =
2125 				*(uint64_t *)(((char *)&rxq_stats) +
2126 				hinic_rxq_stats_strings[i].offset);
2127 			xstats[count].id = count;
2128 			count++;
2129 		}
2130 	}
2131 
2132 	/* Get stats from hinic_txq_stats */
2133 	for (qid = 0; qid < nic_dev->num_sq; qid++) {
2134 		txq = nic_dev->txqs[qid];
2135 		hinic_txq_get_stats(txq, &txq_stats);
2136 
2137 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2138 			xstats[count].value =
2139 				*(uint64_t *)(((char *)&txq_stats) +
2140 				hinic_txq_stats_strings[i].offset);
2141 			xstats[count].id = count;
2142 			count++;
2143 		}
2144 	}
2145 
2146 	/* Get stats from hinic_vport_stats */
2147 	err = hinic_get_vport_stats(nic_dev->hwdev, &vport_stats);
2148 	if (err)
2149 		return err;
2150 
2151 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2152 		xstats[count].value =
2153 			*(uint64_t *)(((char *)&vport_stats) +
2154 			hinic_vport_stats_strings[i].offset);
2155 		xstats[count].id = count;
2156 		count++;
2157 	}
2158 
2159 	if (HINIC_IS_VF(nic_dev->hwdev))
2160 		return count;
2161 
2162 	/* Get stats from hinic_phy_port_stats */
2163 	err = hinic_get_phy_port_stats(nic_dev->hwdev, &port_stats);
2164 	if (err)
2165 		return err;
2166 
2167 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2168 		xstats[count].value = *(uint64_t *)(((char *)&port_stats) +
2169 				hinic_phyport_stats_strings[i].offset);
2170 		xstats[count].id = count;
2171 		count++;
2172 	}
2173 
2174 	return count;
2175 }
2176 
2177 static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2178 				struct rte_eth_rxq_info *qinfo)
2179 {
2180 	struct hinic_rxq  *rxq = dev->data->rx_queues[queue_id];
2181 
2182 	qinfo->mp = rxq->mb_pool;
2183 	qinfo->nb_desc = rxq->q_depth;
2184 }
2185 
2186 static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2187 				struct rte_eth_txq_info *qinfo)
2188 {
2189 	struct hinic_txq  *txq = dev->data->tx_queues[queue_id];
2190 
2191 	qinfo->nb_desc = txq->q_depth;
2192 }
2193 
2194 /**
2195  * DPDK callback to retrieve names of extended device statistics
2196  *
2197  * @param dev
2198  *   Pointer to Ethernet device structure.
2199  * @param xstats_names
2200  *   Buffer to insert names into.
2201  *
2202  * @return
2203  *   Number of xstats names.
2204  */
2205 static int hinic_dev_xstats_get_names(struct rte_eth_dev *dev,
2206 			       struct rte_eth_xstat_name *xstats_names,
2207 			       __rte_unused unsigned int limit)
2208 {
2209 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2210 	int count = 0;
2211 	u16 i = 0, q_num;
2212 
2213 	if (xstats_names == NULL)
2214 		return hinic_xstats_calc_num(nic_dev);
2215 
2216 	/* get pmd rxq stats */
2217 	for (q_num = 0; q_num < nic_dev->num_rq; q_num++) {
2218 		for (i = 0; i < HINIC_RXQ_XSTATS_NUM; i++) {
2219 			snprintf(xstats_names[count].name,
2220 				 sizeof(xstats_names[count].name),
2221 				 "rxq%d_%s_pmd",
2222 				 q_num, hinic_rxq_stats_strings[i].name);
2223 			count++;
2224 		}
2225 	}
2226 
2227 	/* get pmd txq stats */
2228 	for (q_num = 0; q_num < nic_dev->num_sq; q_num++) {
2229 		for (i = 0; i < HINIC_TXQ_XSTATS_NUM; i++) {
2230 			snprintf(xstats_names[count].name,
2231 				 sizeof(xstats_names[count].name),
2232 				 "txq%d_%s_pmd",
2233 				 q_num, hinic_txq_stats_strings[i].name);
2234 			count++;
2235 		}
2236 	}
2237 
2238 	/* get vport stats */
2239 	for (i = 0; i < HINIC_VPORT_XSTATS_NUM; i++) {
2240 		snprintf(xstats_names[count].name,
2241 			 sizeof(xstats_names[count].name),
2242 			 "%s",
2243 			 hinic_vport_stats_strings[i].name);
2244 		count++;
2245 	}
2246 
2247 	if (HINIC_IS_VF(nic_dev->hwdev))
2248 		return count;
2249 
2250 	/* get phy port stats */
2251 	for (i = 0; i < HINIC_PHYPORT_XSTATS_NUM; i++) {
2252 		snprintf(xstats_names[count].name,
2253 			 sizeof(xstats_names[count].name),
2254 			 "%s",
2255 			 hinic_phyport_stats_strings[i].name);
2256 		count++;
2257 	}
2258 
2259 	return count;
2260 }
2261 /**
2262  *  DPDK callback to set mac address
2263  *
2264  * @param dev
2265  *   Pointer to Ethernet device structure.
2266  * @param addr
2267  *   Pointer to mac address
2268  * @return
2269  *   0 on success, negative error value otherwise.
2270  */
2271 static int hinic_set_mac_addr(struct rte_eth_dev *dev,
2272 			      struct rte_ether_addr *addr)
2273 {
2274 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2275 	u16 func_id;
2276 	int err;
2277 
2278 	func_id = hinic_global_func_id(nic_dev->hwdev);
2279 	err = hinic_update_mac(nic_dev->hwdev, nic_dev->default_addr.addr_bytes,
2280 			       addr->addr_bytes, 0, func_id);
2281 	if (err)
2282 		return err;
2283 
2284 	rte_ether_addr_copy(addr, &nic_dev->default_addr);
2285 
2286 	PMD_DRV_LOG(INFO, "Set new mac address %02x:%02x:%02x:%02x:%02x:%02x\n",
2287 		    addr->addr_bytes[0], addr->addr_bytes[1],
2288 		    addr->addr_bytes[2], addr->addr_bytes[3],
2289 		    addr->addr_bytes[4], addr->addr_bytes[5]);
2290 
2291 	return 0;
2292 }
2293 
2294 /**
2295  * DPDK callback to remove a MAC address.
2296  *
2297  * @param dev
2298  *   Pointer to Ethernet device structure.
2299  * @param index
2300  *   MAC address index.
2301  */
2302 static void hinic_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
2303 {
2304 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2305 	u16 func_id;
2306 	int ret;
2307 
2308 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2309 		PMD_DRV_LOG(INFO, "Remove mac index(%u) is out of range",
2310 			    index);
2311 		return;
2312 	}
2313 
2314 	func_id = hinic_global_func_id(nic_dev->hwdev);
2315 	ret = hinic_del_mac(nic_dev->hwdev,
2316 			    dev->data->mac_addrs[index].addr_bytes, 0, func_id);
2317 	if (ret)
2318 		return;
2319 
2320 	memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
2321 }
2322 
2323 /**
2324  * DPDK callback to add a MAC address.
2325  *
2326  * @param dev
2327  *   Pointer to Ethernet device structure.
2328  * @param mac_addr
2329  *   MAC address to register.
2330  * @param index
2331  *   MAC address index.
2332  * @param vmdq
2333  *   VMDq pool index to associate address with (ignored).
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 
2339 static int hinic_mac_addr_add(struct rte_eth_dev *dev,
2340 			      struct rte_ether_addr *mac_addr, uint32_t index,
2341 			      __rte_unused uint32_t vmdq)
2342 {
2343 	struct hinic_nic_dev  *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2344 	unsigned int i;
2345 	u16 func_id;
2346 	int ret;
2347 
2348 	if (index >= HINIC_MAX_UC_MAC_ADDRS) {
2349 		PMD_DRV_LOG(INFO, "Add mac index(%u) is out of range,", index);
2350 		return -EINVAL;
2351 	}
2352 
2353 	/* First, make sure this address isn't already configured. */
2354 	for (i = 0; (i != HINIC_MAX_UC_MAC_ADDRS); ++i) {
2355 		/* Skip this index, it's going to be reconfigured. */
2356 		if (i == index)
2357 			continue;
2358 
2359 		if (memcmp(&dev->data->mac_addrs[i],
2360 			mac_addr, sizeof(*mac_addr)))
2361 			continue;
2362 
2363 		PMD_DRV_LOG(INFO, "MAC address already configured");
2364 		return -EADDRINUSE;
2365 	}
2366 
2367 	func_id = hinic_global_func_id(nic_dev->hwdev);
2368 	ret = hinic_set_mac(nic_dev->hwdev, mac_addr->addr_bytes, 0, func_id);
2369 	if (ret)
2370 		return ret;
2371 
2372 	dev->data->mac_addrs[index] = *mac_addr;
2373 	return 0;
2374 }
2375 
2376 /**
2377  *  DPDK callback to set multicast mac address
2378  *
2379  * @param dev
2380  *   Pointer to Ethernet device structure.
2381  * @param mc_addr_set
2382  *   Pointer to multicast mac address
2383  * @param nb_mc_addr
2384  *   mc addr count
2385  * @return
2386  *   0 on success, negative error value otherwise.
2387  */
2388 static int hinic_set_mc_addr_list(struct rte_eth_dev *dev,
2389 				  struct rte_ether_addr *mc_addr_set,
2390 				  uint32_t nb_mc_addr)
2391 {
2392 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2393 	u16 func_id;
2394 	int ret;
2395 	u32 i;
2396 
2397 	func_id = hinic_global_func_id(nic_dev->hwdev);
2398 
2399 	/* delete old multi_cast addrs firstly */
2400 	hinic_delete_mc_addr_list(nic_dev);
2401 
2402 	if (nb_mc_addr > HINIC_MAX_MC_MAC_ADDRS)
2403 		goto allmulti;
2404 
2405 	for (i = 0; i < nb_mc_addr; i++) {
2406 		ret = hinic_set_mac(nic_dev->hwdev, mc_addr_set[i].addr_bytes,
2407 				    0, func_id);
2408 		/* if add mc addr failed, set all multi_cast */
2409 		if (ret) {
2410 			hinic_delete_mc_addr_list(nic_dev);
2411 			goto allmulti;
2412 		}
2413 
2414 		rte_ether_addr_copy(&mc_addr_set[i], &nic_dev->mc_list[i]);
2415 	}
2416 
2417 	return 0;
2418 
2419 allmulti:
2420 	hinic_dev_allmulticast_enable(dev);
2421 
2422 	return 0;
2423 }
2424 
2425 /**
2426  * DPDK callback to manage filter operations
2427  *
2428  * @param dev
2429  *   Pointer to Ethernet device structure.
2430  * @param filter_type
2431  *   Filter type.
2432  * @param filter_op
2433  *   Operation to perform.
2434  * @param arg
2435  *   Pointer to operation-specific structure.
2436  *
2437  * @return
2438  *   0 on success, negative errno value on failure.
2439  */
2440 static int hinic_dev_filter_ctrl(struct rte_eth_dev *dev,
2441 		     enum rte_filter_type filter_type,
2442 		     enum rte_filter_op filter_op,
2443 		     void *arg)
2444 {
2445 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2446 	int func_id = hinic_global_func_id(nic_dev->hwdev);
2447 
2448 	switch (filter_type) {
2449 	case RTE_ETH_FILTER_GENERIC:
2450 		if (filter_op != RTE_ETH_FILTER_GET)
2451 			return -EINVAL;
2452 		*(const void **)arg = &hinic_flow_ops;
2453 		break;
2454 	default:
2455 		PMD_DRV_LOG(INFO, "Filter type (%d) not supported",
2456 			filter_type);
2457 		return -EINVAL;
2458 	}
2459 
2460 	PMD_DRV_LOG(INFO, "Set filter_ctrl succeed, func_id: 0x%x, filter_type: 0x%x,"
2461 			"filter_op: 0x%x.", func_id, filter_type, filter_op);
2462 	return 0;
2463 }
2464 
2465 static int hinic_set_default_pause_feature(struct hinic_nic_dev *nic_dev)
2466 {
2467 	struct nic_pause_config pause_config = {0};
2468 
2469 	pause_config.auto_neg = 0;
2470 	pause_config.rx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2471 	pause_config.tx_pause = HINIC_DEFAUT_PAUSE_CONFIG;
2472 
2473 	return hinic_set_pause_config(nic_dev->hwdev, pause_config);
2474 }
2475 
2476 static int hinic_set_default_dcb_feature(struct hinic_nic_dev *nic_dev)
2477 {
2478 	u8 up_tc[HINIC_DCB_UP_MAX] = {0};
2479 	u8 up_pgid[HINIC_DCB_UP_MAX] = {0};
2480 	u8 up_bw[HINIC_DCB_UP_MAX] = {0};
2481 	u8 pg_bw[HINIC_DCB_UP_MAX] = {0};
2482 	u8 up_strict[HINIC_DCB_UP_MAX] = {0};
2483 	int i = 0;
2484 
2485 	pg_bw[0] = 100;
2486 	for (i = 0; i < HINIC_DCB_UP_MAX; i++)
2487 		up_bw[i] = 100;
2488 
2489 	return hinic_dcb_set_ets(nic_dev->hwdev, up_tc, pg_bw,
2490 					up_pgid, up_bw, up_strict);
2491 }
2492 
2493 static int hinic_init_default_cos(struct hinic_nic_dev *nic_dev)
2494 {
2495 	u8 cos_id = 0;
2496 	int err;
2497 
2498 	if (!HINIC_IS_VF(nic_dev->hwdev)) {
2499 		nic_dev->default_cos =
2500 				(hinic_global_func_id(nic_dev->hwdev) +
2501 						DEFAULT_BASE_COS) % NR_MAX_COS;
2502 	} else {
2503 		err = hinic_vf_get_default_cos(nic_dev->hwdev, &cos_id);
2504 		if (err) {
2505 			PMD_DRV_LOG(ERR, "Get VF default cos failed, err: %d",
2506 					err);
2507 			return HINIC_ERROR;
2508 		}
2509 
2510 		nic_dev->default_cos = cos_id;
2511 	}
2512 
2513 	return 0;
2514 }
2515 
2516 static int hinic_set_default_hw_feature(struct hinic_nic_dev *nic_dev)
2517 {
2518 	int err;
2519 
2520 	err = hinic_init_default_cos(nic_dev);
2521 	if (err)
2522 		return err;
2523 
2524 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2525 		return 0;
2526 
2527 	/* Restore DCB configure to default status */
2528 	err = hinic_set_default_dcb_feature(nic_dev);
2529 	if (err)
2530 		return err;
2531 
2532 	/* Set pause enable, and up will disable pfc. */
2533 	err = hinic_set_default_pause_feature(nic_dev);
2534 	if (err)
2535 		return err;
2536 
2537 	err = hinic_reset_port_link_cfg(nic_dev->hwdev);
2538 	if (err)
2539 		return err;
2540 
2541 	err = hinic_set_link_status_follow(nic_dev->hwdev,
2542 					   HINIC_LINK_FOLLOW_PORT);
2543 	if (err == HINIC_MGMT_CMD_UNSUPPORTED)
2544 		PMD_DRV_LOG(WARNING, "Don't support to set link status follow phy port status");
2545 	else if (err)
2546 		return err;
2547 
2548 	return hinic_set_anti_attack(nic_dev->hwdev, true);
2549 }
2550 
2551 static int32_t hinic_card_workmode_check(struct hinic_nic_dev *nic_dev)
2552 {
2553 	struct hinic_board_info info = { 0 };
2554 	int rc;
2555 
2556 	if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2557 		return 0;
2558 
2559 	rc = hinic_get_board_info(nic_dev->hwdev, &info);
2560 	if (rc)
2561 		return rc;
2562 
2563 	return (info.service_mode == HINIC_SERVICE_MODE_NIC ? HINIC_OK :
2564 						HINIC_ERROR);
2565 }
2566 
2567 static int hinic_copy_mempool_init(struct hinic_nic_dev *nic_dev)
2568 {
2569 	nic_dev->cpy_mpool = rte_mempool_lookup(nic_dev->proc_dev_name);
2570 	if (nic_dev->cpy_mpool == NULL) {
2571 		nic_dev->cpy_mpool =
2572 		rte_pktmbuf_pool_create(nic_dev->proc_dev_name,
2573 					HINIC_COPY_MEMPOOL_DEPTH,
2574 					0, 0,
2575 					HINIC_COPY_MBUF_SIZE,
2576 					rte_socket_id());
2577 		if (!nic_dev->cpy_mpool) {
2578 			PMD_DRV_LOG(ERR, "Create copy mempool failed, errno: %d, dev_name: %s",
2579 				    rte_errno, nic_dev->proc_dev_name);
2580 			return -ENOMEM;
2581 		}
2582 	}
2583 
2584 	return 0;
2585 }
2586 
2587 static void hinic_copy_mempool_uninit(struct hinic_nic_dev *nic_dev)
2588 {
2589 	if (nic_dev->cpy_mpool != NULL)
2590 		rte_mempool_free(nic_dev->cpy_mpool);
2591 }
2592 
2593 static int hinic_init_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2594 {
2595 	u32 txq_size;
2596 	u32 rxq_size;
2597 
2598 	/* allocate software txq array */
2599 	txq_size = nic_dev->nic_cap.max_sqs * sizeof(*nic_dev->txqs);
2600 	nic_dev->txqs = kzalloc_aligned(txq_size, GFP_KERNEL);
2601 	if (!nic_dev->txqs) {
2602 		PMD_DRV_LOG(ERR, "Allocate txqs failed");
2603 		return -ENOMEM;
2604 	}
2605 
2606 	/* allocate software rxq array */
2607 	rxq_size = nic_dev->nic_cap.max_rqs * sizeof(*nic_dev->rxqs);
2608 	nic_dev->rxqs = kzalloc_aligned(rxq_size, GFP_KERNEL);
2609 	if (!nic_dev->rxqs) {
2610 		/* free txqs */
2611 		kfree(nic_dev->txqs);
2612 		nic_dev->txqs = NULL;
2613 
2614 		PMD_DRV_LOG(ERR, "Allocate rxqs failed");
2615 		return -ENOMEM;
2616 	}
2617 
2618 	return HINIC_OK;
2619 }
2620 
2621 static void hinic_deinit_sw_rxtxqs(struct hinic_nic_dev *nic_dev)
2622 {
2623 	kfree(nic_dev->txqs);
2624 	nic_dev->txqs = NULL;
2625 
2626 	kfree(nic_dev->rxqs);
2627 	nic_dev->rxqs = NULL;
2628 }
2629 
2630 static int hinic_nic_dev_create(struct rte_eth_dev *eth_dev)
2631 {
2632 	struct hinic_nic_dev *nic_dev =
2633 				HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2634 	int rc;
2635 
2636 	nic_dev->hwdev = rte_zmalloc("hinic_hwdev", sizeof(*nic_dev->hwdev),
2637 				     RTE_CACHE_LINE_SIZE);
2638 	if (!nic_dev->hwdev) {
2639 		PMD_DRV_LOG(ERR, "Allocate hinic hwdev memory failed, dev_name: %s",
2640 			    eth_dev->data->name);
2641 		return -ENOMEM;
2642 	}
2643 	nic_dev->hwdev->pcidev_hdl = RTE_ETH_DEV_TO_PCI(eth_dev);
2644 
2645 	/* init osdep*/
2646 	rc = hinic_osdep_init(nic_dev->hwdev);
2647 	if (rc) {
2648 		PMD_DRV_LOG(ERR, "Initialize os_dep failed, dev_name: %s",
2649 			    eth_dev->data->name);
2650 		goto init_osdep_fail;
2651 	}
2652 
2653 	/* init_hwif */
2654 	rc = hinic_hwif_res_init(nic_dev->hwdev);
2655 	if (rc) {
2656 		PMD_DRV_LOG(ERR, "Initialize hwif failed, dev_name: %s",
2657 			    eth_dev->data->name);
2658 		goto init_hwif_fail;
2659 	}
2660 
2661 	/* init_cfg_mgmt */
2662 	rc = init_cfg_mgmt(nic_dev->hwdev);
2663 	if (rc) {
2664 		PMD_DRV_LOG(ERR, "Initialize cfg_mgmt failed, dev_name: %s",
2665 			    eth_dev->data->name);
2666 		goto init_cfgmgnt_fail;
2667 	}
2668 
2669 	/* init_aeqs */
2670 	rc = hinic_comm_aeqs_init(nic_dev->hwdev);
2671 	if (rc) {
2672 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, dev_name: %s",
2673 			    eth_dev->data->name);
2674 		goto init_aeqs_fail;
2675 	}
2676 
2677 	/* init_pf_to_mgnt */
2678 	rc = hinic_comm_pf_to_mgmt_init(nic_dev->hwdev);
2679 	if (rc) {
2680 		PMD_DRV_LOG(ERR, "Initialize pf_to_mgmt failed, dev_name: %s",
2681 			    eth_dev->data->name);
2682 		goto init_pf_to_mgmt_fail;
2683 	}
2684 
2685 	/* init mailbox */
2686 	rc = hinic_comm_func_to_func_init(nic_dev->hwdev);
2687 	if (rc) {
2688 		PMD_DRV_LOG(ERR, "Initialize func_to_func failed, dev_name: %s",
2689 			    eth_dev->data->name);
2690 		goto init_func_to_func_fail;
2691 	}
2692 
2693 	rc = hinic_card_workmode_check(nic_dev);
2694 	if (rc) {
2695 		PMD_DRV_LOG(ERR, "Check card workmode failed, dev_name: %s",
2696 			    eth_dev->data->name);
2697 		goto workmode_check_fail;
2698 	}
2699 
2700 	/* do l2nic reset to make chip clear */
2701 	rc = hinic_l2nic_reset(nic_dev->hwdev);
2702 	if (rc) {
2703 		PMD_DRV_LOG(ERR, "Do l2nic reset failed, dev_name: %s",
2704 			    eth_dev->data->name);
2705 		goto l2nic_reset_fail;
2706 	}
2707 
2708 	/* init dma and aeq msix attribute table */
2709 	(void)hinic_init_attr_table(nic_dev->hwdev);
2710 
2711 	/* init_cmdqs */
2712 	rc = hinic_comm_cmdqs_init(nic_dev->hwdev);
2713 	if (rc) {
2714 		PMD_DRV_LOG(ERR, "Initialize cmdq failed, dev_name: %s",
2715 			    eth_dev->data->name);
2716 		goto init_cmdq_fail;
2717 	}
2718 
2719 	/* set hardware state active */
2720 	rc = hinic_activate_hwdev_state(nic_dev->hwdev);
2721 	if (rc) {
2722 		PMD_DRV_LOG(ERR, "Initialize resources state failed, dev_name: %s",
2723 			    eth_dev->data->name);
2724 		goto init_resources_state_fail;
2725 	}
2726 
2727 	/* init_capability */
2728 	rc = hinic_init_capability(nic_dev->hwdev);
2729 	if (rc) {
2730 		PMD_DRV_LOG(ERR, "Initialize capability failed, dev_name: %s",
2731 			    eth_dev->data->name);
2732 		goto init_cap_fail;
2733 	}
2734 
2735 	/* get nic capability */
2736 	if (!hinic_support_nic(nic_dev->hwdev, &nic_dev->nic_cap))
2737 		goto nic_check_fail;
2738 
2739 	/* init root cla and function table */
2740 	rc = hinic_init_nicio(nic_dev->hwdev);
2741 	if (rc) {
2742 		PMD_DRV_LOG(ERR, "Initialize nic_io failed, dev_name: %s",
2743 			    eth_dev->data->name);
2744 		goto init_nicio_fail;
2745 	}
2746 
2747 	/* init_software_txrxq */
2748 	rc = hinic_init_sw_rxtxqs(nic_dev);
2749 	if (rc) {
2750 		PMD_DRV_LOG(ERR, "Initialize sw_rxtxqs failed, dev_name: %s",
2751 			    eth_dev->data->name);
2752 		goto init_sw_rxtxqs_fail;
2753 	}
2754 
2755 	rc = hinic_copy_mempool_init(nic_dev);
2756 	if (rc) {
2757 		PMD_DRV_LOG(ERR, "Create copy mempool failed, dev_name: %s",
2758 			 eth_dev->data->name);
2759 		goto init_mpool_fail;
2760 	}
2761 
2762 	/* set hardware feature to default status */
2763 	rc = hinic_set_default_hw_feature(nic_dev);
2764 	if (rc) {
2765 		PMD_DRV_LOG(ERR, "Initialize hardware default features failed, dev_name: %s",
2766 			    eth_dev->data->name);
2767 		goto set_default_hw_feature_fail;
2768 	}
2769 
2770 	return 0;
2771 
2772 set_default_hw_feature_fail:
2773 	hinic_copy_mempool_uninit(nic_dev);
2774 
2775 init_mpool_fail:
2776 	hinic_deinit_sw_rxtxqs(nic_dev);
2777 
2778 init_sw_rxtxqs_fail:
2779 	hinic_deinit_nicio(nic_dev->hwdev);
2780 
2781 nic_check_fail:
2782 init_nicio_fail:
2783 init_cap_fail:
2784 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2785 
2786 init_resources_state_fail:
2787 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2788 
2789 init_cmdq_fail:
2790 l2nic_reset_fail:
2791 workmode_check_fail:
2792 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2793 
2794 init_func_to_func_fail:
2795 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2796 
2797 init_pf_to_mgmt_fail:
2798 	hinic_comm_aeqs_free(nic_dev->hwdev);
2799 
2800 init_aeqs_fail:
2801 	free_cfg_mgmt(nic_dev->hwdev);
2802 
2803 init_cfgmgnt_fail:
2804 	hinic_hwif_res_free(nic_dev->hwdev);
2805 
2806 init_hwif_fail:
2807 	hinic_osdep_deinit(nic_dev->hwdev);
2808 
2809 init_osdep_fail:
2810 	rte_free(nic_dev->hwdev);
2811 	nic_dev->hwdev = NULL;
2812 
2813 	return rc;
2814 }
2815 
2816 static void hinic_nic_dev_destroy(struct rte_eth_dev *eth_dev)
2817 {
2818 	struct hinic_nic_dev *nic_dev =
2819 			HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2820 
2821 	(void)hinic_set_link_status_follow(nic_dev->hwdev,
2822 					   HINIC_LINK_FOLLOW_DEFAULT);
2823 	hinic_copy_mempool_uninit(nic_dev);
2824 	hinic_deinit_sw_rxtxqs(nic_dev);
2825 	hinic_deinit_nicio(nic_dev->hwdev);
2826 	hinic_deactivate_hwdev_state(nic_dev->hwdev);
2827 	hinic_comm_cmdqs_free(nic_dev->hwdev);
2828 	hinic_comm_func_to_func_free(nic_dev->hwdev);
2829 	hinic_comm_pf_to_mgmt_free(nic_dev->hwdev);
2830 	hinic_comm_aeqs_free(nic_dev->hwdev);
2831 	free_cfg_mgmt(nic_dev->hwdev);
2832 	hinic_hwif_res_free(nic_dev->hwdev);
2833 	hinic_osdep_deinit(nic_dev->hwdev);
2834 	rte_free(nic_dev->hwdev);
2835 	nic_dev->hwdev = NULL;
2836 }
2837 
2838 /**
2839  * DPDK callback to close the device.
2840  *
2841  * @param dev
2842  *   Pointer to Ethernet device structure.
2843  */
2844 static void hinic_dev_close(struct rte_eth_dev *dev)
2845 {
2846 	struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2847 
2848 	if (hinic_test_and_set_bit(HINIC_DEV_CLOSE, &nic_dev->dev_status)) {
2849 		PMD_DRV_LOG(WARNING, "Device %s already closed",
2850 			    dev->data->name);
2851 		return;
2852 	}
2853 
2854 	/* stop device first */
2855 	hinic_dev_stop(dev);
2856 
2857 	/* rx_cqe, rx_info */
2858 	hinic_free_all_rx_resources(dev);
2859 
2860 	/* tx_info */
2861 	hinic_free_all_tx_resources(dev);
2862 
2863 	/* free wq, pi_dma_addr */
2864 	hinic_free_all_rq(nic_dev);
2865 
2866 	/* free wq, db_addr */
2867 	hinic_free_all_sq(nic_dev);
2868 
2869 	/* deinit mac vlan tbl */
2870 	hinic_deinit_mac_addr(dev);
2871 	hinic_remove_all_vlanid(dev);
2872 
2873 	/* disable hardware and uio interrupt */
2874 	hinic_disable_interrupt(dev);
2875 
2876 	/* deinit nic hardware device */
2877 	hinic_nic_dev_destroy(dev);
2878 }
2879 
2880 static const struct eth_dev_ops hinic_pmd_ops = {
2881 	.dev_configure                 = hinic_dev_configure,
2882 	.dev_infos_get                 = hinic_dev_infos_get,
2883 	.fw_version_get                = hinic_fw_version_get,
2884 	.rx_queue_setup                = hinic_rx_queue_setup,
2885 	.tx_queue_setup                = hinic_tx_queue_setup,
2886 	.dev_start                     = hinic_dev_start,
2887 	.dev_set_link_up               = hinic_dev_set_link_up,
2888 	.dev_set_link_down             = hinic_dev_set_link_down,
2889 	.link_update                   = hinic_link_update,
2890 	.rx_queue_release              = hinic_rx_queue_release,
2891 	.tx_queue_release              = hinic_tx_queue_release,
2892 	.dev_stop                      = hinic_dev_stop,
2893 	.dev_close                     = hinic_dev_close,
2894 	.mtu_set                       = hinic_dev_set_mtu,
2895 	.vlan_filter_set               = hinic_vlan_filter_set,
2896 	.vlan_offload_set              = hinic_vlan_offload_set,
2897 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
2898 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
2899 	.promiscuous_enable            = hinic_dev_promiscuous_enable,
2900 	.promiscuous_disable           = hinic_dev_promiscuous_disable,
2901 	.rss_hash_update               = hinic_rss_hash_update,
2902 	.rss_hash_conf_get             = hinic_rss_conf_get,
2903 	.reta_update                   = hinic_rss_indirtbl_update,
2904 	.reta_query                    = hinic_rss_indirtbl_query,
2905 	.stats_get                     = hinic_dev_stats_get,
2906 	.stats_reset                   = hinic_dev_stats_reset,
2907 	.xstats_get                    = hinic_dev_xstats_get,
2908 	.xstats_reset                  = hinic_dev_xstats_reset,
2909 	.xstats_get_names              = hinic_dev_xstats_get_names,
2910 	.rxq_info_get                  = hinic_rxq_info_get,
2911 	.txq_info_get                  = hinic_txq_info_get,
2912 	.mac_addr_set                  = hinic_set_mac_addr,
2913 	.mac_addr_remove               = hinic_mac_addr_remove,
2914 	.mac_addr_add                  = hinic_mac_addr_add,
2915 	.set_mc_addr_list              = hinic_set_mc_addr_list,
2916 	.filter_ctrl                   = hinic_dev_filter_ctrl,
2917 };
2918 
2919 static const struct eth_dev_ops hinic_pmd_vf_ops = {
2920 	.dev_configure                 = hinic_dev_configure,
2921 	.dev_infos_get                 = hinic_dev_infos_get,
2922 	.fw_version_get                = hinic_fw_version_get,
2923 	.rx_queue_setup                = hinic_rx_queue_setup,
2924 	.tx_queue_setup                = hinic_tx_queue_setup,
2925 	.dev_start                     = hinic_dev_start,
2926 	.link_update                   = hinic_link_update,
2927 	.rx_queue_release              = hinic_rx_queue_release,
2928 	.tx_queue_release              = hinic_tx_queue_release,
2929 	.dev_stop                      = hinic_dev_stop,
2930 	.dev_close                     = hinic_dev_close,
2931 	.mtu_set                       = hinic_dev_set_mtu,
2932 	.vlan_filter_set               = hinic_vlan_filter_set,
2933 	.vlan_offload_set              = hinic_vlan_offload_set,
2934 	.allmulticast_enable           = hinic_dev_allmulticast_enable,
2935 	.allmulticast_disable          = hinic_dev_allmulticast_disable,
2936 	.rss_hash_update               = hinic_rss_hash_update,
2937 	.rss_hash_conf_get             = hinic_rss_conf_get,
2938 	.reta_update                   = hinic_rss_indirtbl_update,
2939 	.reta_query                    = hinic_rss_indirtbl_query,
2940 	.stats_get                     = hinic_dev_stats_get,
2941 	.stats_reset                   = hinic_dev_stats_reset,
2942 	.xstats_get                    = hinic_dev_xstats_get,
2943 	.xstats_reset                  = hinic_dev_xstats_reset,
2944 	.xstats_get_names              = hinic_dev_xstats_get_names,
2945 	.rxq_info_get                  = hinic_rxq_info_get,
2946 	.txq_info_get                  = hinic_txq_info_get,
2947 	.mac_addr_set                  = hinic_set_mac_addr,
2948 	.mac_addr_remove               = hinic_mac_addr_remove,
2949 	.mac_addr_add                  = hinic_mac_addr_add,
2950 	.set_mc_addr_list              = hinic_set_mc_addr_list,
2951 	.filter_ctrl                   = hinic_dev_filter_ctrl,
2952 };
2953 
2954 static int hinic_func_init(struct rte_eth_dev *eth_dev)
2955 {
2956 	struct rte_pci_device *pci_dev;
2957 	struct rte_ether_addr *eth_addr;
2958 	struct hinic_nic_dev *nic_dev;
2959 	struct hinic_filter_info *filter_info;
2960 	u32 mac_size;
2961 	int rc;
2962 
2963 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2964 
2965 	/* EAL is SECONDARY and eth_dev is already created */
2966 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2967 		rc = rte_intr_callback_register(&pci_dev->intr_handle,
2968 						hinic_dev_interrupt_handler,
2969 						(void *)eth_dev);
2970 		if (rc)
2971 			PMD_DRV_LOG(ERR, "Initialize %s failed in secondary process",
2972 				    eth_dev->data->name);
2973 
2974 		return rc;
2975 	}
2976 
2977 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
2978 	memset(nic_dev, 0, sizeof(*nic_dev));
2979 
2980 	snprintf(nic_dev->proc_dev_name,
2981 		 sizeof(nic_dev->proc_dev_name),
2982 		 "hinic-%.4x:%.2x:%.2x.%x",
2983 		 pci_dev->addr.domain, pci_dev->addr.bus,
2984 		 pci_dev->addr.devid, pci_dev->addr.function);
2985 
2986 	/* alloc mac_addrs */
2987 	mac_size = HINIC_MAX_UC_MAC_ADDRS * sizeof(struct rte_ether_addr);
2988 	eth_addr = rte_zmalloc("hinic_mac", mac_size, 0);
2989 	if (!eth_addr) {
2990 		PMD_DRV_LOG(ERR, "Allocate ethernet addresses' memory failed, dev_name: %s",
2991 			    eth_dev->data->name);
2992 		rc = -ENOMEM;
2993 		goto eth_addr_fail;
2994 	}
2995 	eth_dev->data->mac_addrs = eth_addr;
2996 
2997 	mac_size = HINIC_MAX_MC_MAC_ADDRS * sizeof(struct rte_ether_addr);
2998 	nic_dev->mc_list = rte_zmalloc("hinic_mc", mac_size, 0);
2999 	if (!nic_dev->mc_list) {
3000 		PMD_DRV_LOG(ERR, "Allocate mcast address' memory failed, dev_name: %s",
3001 			    eth_dev->data->name);
3002 		rc = -ENOMEM;
3003 		goto mc_addr_fail;
3004 	}
3005 
3006 	/*
3007 	 * Pass the information to the rte_eth_dev_close() that it should also
3008 	 * release the private port resources.
3009 	 */
3010 	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
3011 
3012 	/* create hardware nic_device */
3013 	rc = hinic_nic_dev_create(eth_dev);
3014 	if (rc) {
3015 		PMD_DRV_LOG(ERR, "Create nic device failed, dev_name: %s",
3016 			    eth_dev->data->name);
3017 		goto create_nic_dev_fail;
3018 	}
3019 
3020 	if (HINIC_IS_VF(nic_dev->hwdev))
3021 		eth_dev->dev_ops = &hinic_pmd_vf_ops;
3022 	else
3023 		eth_dev->dev_ops = &hinic_pmd_ops;
3024 
3025 	rc = hinic_init_mac_addr(eth_dev);
3026 	if (rc) {
3027 		PMD_DRV_LOG(ERR, "Initialize mac table failed, dev_name: %s",
3028 			    eth_dev->data->name);
3029 		goto init_mac_fail;
3030 	}
3031 
3032 	/* register callback func to eal lib */
3033 	rc = rte_intr_callback_register(&pci_dev->intr_handle,
3034 					hinic_dev_interrupt_handler,
3035 					(void *)eth_dev);
3036 	if (rc) {
3037 		PMD_DRV_LOG(ERR, "Register rte interrupt callback failed, dev_name: %s",
3038 			    eth_dev->data->name);
3039 		goto reg_intr_cb_fail;
3040 	}
3041 
3042 	/* enable uio/vfio intr/eventfd mapping */
3043 	rc = rte_intr_enable(&pci_dev->intr_handle);
3044 	if (rc) {
3045 		PMD_DRV_LOG(ERR, "Enable rte interrupt failed, dev_name: %s",
3046 			    eth_dev->data->name);
3047 		goto enable_intr_fail;
3048 	}
3049 	hinic_set_bit(HINIC_DEV_INTR_EN, &nic_dev->dev_status);
3050 
3051 	/* initialize filter info */
3052 	filter_info = &nic_dev->filter;
3053 	memset(filter_info, 0, sizeof(struct hinic_filter_info));
3054 	/* initialize 5tuple filter list */
3055 	TAILQ_INIT(&filter_info->fivetuple_list);
3056 	TAILQ_INIT(&nic_dev->filter_ntuple_list);
3057 	TAILQ_INIT(&nic_dev->filter_ethertype_list);
3058 	TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
3059 	TAILQ_INIT(&nic_dev->hinic_flow_list);
3060 
3061 	hinic_set_bit(HINIC_DEV_INIT, &nic_dev->dev_status);
3062 	PMD_DRV_LOG(INFO, "Initialize %s in primary successfully",
3063 		    eth_dev->data->name);
3064 
3065 	return 0;
3066 
3067 enable_intr_fail:
3068 	(void)rte_intr_callback_unregister(&pci_dev->intr_handle,
3069 					   hinic_dev_interrupt_handler,
3070 					   (void *)eth_dev);
3071 
3072 reg_intr_cb_fail:
3073 	hinic_deinit_mac_addr(eth_dev);
3074 
3075 init_mac_fail:
3076 	eth_dev->dev_ops = NULL;
3077 	hinic_nic_dev_destroy(eth_dev);
3078 
3079 create_nic_dev_fail:
3080 	rte_free(nic_dev->mc_list);
3081 	nic_dev->mc_list = NULL;
3082 
3083 mc_addr_fail:
3084 	rte_free(eth_addr);
3085 	eth_dev->data->mac_addrs = NULL;
3086 
3087 eth_addr_fail:
3088 	PMD_DRV_LOG(ERR, "Initialize %s in primary failed",
3089 		    eth_dev->data->name);
3090 	return rc;
3091 }
3092 
3093 static int hinic_dev_init(struct rte_eth_dev *eth_dev)
3094 {
3095 	struct rte_pci_device *pci_dev;
3096 
3097 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
3098 
3099 	PMD_DRV_LOG(INFO, "Initializing pf hinic-%.4x:%.2x:%.2x.%x in %s process",
3100 		    pci_dev->addr.domain, pci_dev->addr.bus,
3101 		    pci_dev->addr.devid, pci_dev->addr.function,
3102 		    (rte_eal_process_type() == RTE_PROC_PRIMARY) ?
3103 		    "primary" : "secondary");
3104 
3105 	/* rte_eth_dev rx_burst and tx_burst */
3106 	eth_dev->rx_pkt_burst = hinic_recv_pkts;
3107 	eth_dev->tx_pkt_burst = hinic_xmit_pkts;
3108 
3109 	return hinic_func_init(eth_dev);
3110 }
3111 
3112 static int hinic_dev_uninit(struct rte_eth_dev *dev)
3113 {
3114 	struct hinic_nic_dev *nic_dev;
3115 
3116 	nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3117 	hinic_clear_bit(HINIC_DEV_INIT, &nic_dev->dev_status);
3118 
3119 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3120 		return 0;
3121 
3122 	hinic_dev_close(dev);
3123 
3124 	dev->dev_ops = NULL;
3125 	dev->rx_pkt_burst = NULL;
3126 	dev->tx_pkt_burst = NULL;
3127 
3128 	rte_free(nic_dev->mc_list);
3129 
3130 	rte_free(dev->data->mac_addrs);
3131 	dev->data->mac_addrs = NULL;
3132 
3133 	return HINIC_OK;
3134 }
3135 
3136 static struct rte_pci_id pci_id_hinic_map[] = {
3137 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
3138 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
3139 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_40GE) },
3140 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
3141 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
3142 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },
3143 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_DUAL_25GE) },
3144 	{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_1822_100GE) },
3145 	{.vendor_id = 0},
3146 };
3147 
3148 static int hinic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3149 			   struct rte_pci_device *pci_dev)
3150 {
3151 	return rte_eth_dev_pci_generic_probe(pci_dev,
3152 		sizeof(struct hinic_nic_dev), hinic_dev_init);
3153 }
3154 
3155 static int hinic_pci_remove(struct rte_pci_device *pci_dev)
3156 {
3157 	return rte_eth_dev_pci_generic_remove(pci_dev, hinic_dev_uninit);
3158 }
3159 
3160 static struct rte_pci_driver rte_hinic_pmd = {
3161 	.id_table = pci_id_hinic_map,
3162 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
3163 	.probe = hinic_pci_probe,
3164 	.remove = hinic_pci_remove,
3165 };
3166 
3167 RTE_PMD_REGISTER_PCI(net_hinic, rte_hinic_pmd);
3168 RTE_PMD_REGISTER_PCI_TABLE(net_hinic, pci_id_hinic_map);
3169 
3170 RTE_INIT(hinic_init_log)
3171 {
3172 	hinic_logtype = rte_log_register("pmd.net.hinic");
3173 	if (hinic_logtype >= 0)
3174 		rte_log_set_level(hinic_logtype, RTE_LOG_INFO);
3175 }
3176