xref: /dpdk/drivers/net/thunderx/nicvf_ethdev.c (revision 358309f36776ba397601ba25710e7d23ee8f55ce)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <unistd.h>
12 #include <stdarg.h>
13 #include <inttypes.h>
14 #include <netinet/in.h>
15 #include <sys/queue.h>
16 
17 #include <rte_alarm.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_byteorder.h>
20 #include <rte_common.h>
21 #include <rte_cycles.h>
22 #include <rte_debug.h>
23 #include <rte_dev.h>
24 #include <rte_eal.h>
25 #include <rte_ether.h>
26 #include <rte_ethdev_driver.h>
27 #include <rte_ethdev_pci.h>
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_memory.h>
31 #include <rte_memzone.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
34 #include <rte_pci.h>
35 #include <rte_bus_pci.h>
36 #include <rte_tailq.h>
37 
38 #include "base/nicvf_plat.h"
39 
40 #include "nicvf_ethdev.h"
41 #include "nicvf_rxtx.h"
42 #include "nicvf_svf.h"
43 #include "nicvf_logs.h"
44 
45 int nicvf_logtype_mbox;
46 int nicvf_logtype_init;
47 int nicvf_logtype_driver;
48 
49 static void nicvf_dev_stop(struct rte_eth_dev *dev);
50 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
51 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
52 			  bool cleanup);
53 
54 RTE_INIT(nicvf_init_log);
55 static void
56 nicvf_init_log(void)
57 {
58 	nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
59 	if (nicvf_logtype_mbox >= 0)
60 		rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
61 
62 	nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
63 	if (nicvf_logtype_init >= 0)
64 		rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
65 
66 	nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
67 	if (nicvf_logtype_driver >= 0)
68 		rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
69 }
70 
71 static void
72 nicvf_link_status_update(struct nicvf *nic,
73 			 struct rte_eth_link *link)
74 {
75 	memset(link, 0, sizeof(*link));
76 
77 	link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
78 
79 	if (nic->duplex == NICVF_HALF_DUPLEX)
80 		link->link_duplex = ETH_LINK_HALF_DUPLEX;
81 	else if (nic->duplex == NICVF_FULL_DUPLEX)
82 		link->link_duplex = ETH_LINK_FULL_DUPLEX;
83 	link->link_speed = nic->speed;
84 	link->link_autoneg = ETH_LINK_AUTONEG;
85 }
86 
87 static void
88 nicvf_interrupt(void *arg)
89 {
90 	struct rte_eth_dev *dev = arg;
91 	struct nicvf *nic = nicvf_pmd_priv(dev);
92 	struct rte_eth_link link;
93 
94 	if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
95 		if (dev->data->dev_conf.intr_conf.lsc) {
96 			nicvf_link_status_update(nic, &link);
97 			rte_eth_linkstatus_set(dev, &link);
98 
99 			_rte_eth_dev_callback_process(dev,
100 						      RTE_ETH_EVENT_INTR_LSC,
101 						      NULL);
102 		}
103 	}
104 
105 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
106 				nicvf_interrupt, dev);
107 }
108 
109 static void
110 nicvf_vf_interrupt(void *arg)
111 {
112 	struct nicvf *nic = arg;
113 
114 	nicvf_reg_poll_interrupts(nic);
115 
116 	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117 				nicvf_vf_interrupt, nic);
118 }
119 
120 static int
121 nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
122 {
123 	return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
124 }
125 
126 static int
127 nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
128 {
129 	return rte_eal_alarm_cancel(fn, arg);
130 }
131 
132 /*
133  * Return 0 means link status changed, -1 means not changed
134  */
135 static int
136 nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
137 {
138 #define CHECK_INTERVAL 100  /* 100ms */
139 #define MAX_CHECK_TIME 90   /* 9s (90 * 100ms) in total */
140 	struct rte_eth_link link;
141 	struct nicvf *nic = nicvf_pmd_priv(dev);
142 	int i;
143 
144 	PMD_INIT_FUNC_TRACE();
145 
146 	if (wait_to_complete) {
147 		/* rte_eth_link_get() might need to wait up to 9 seconds */
148 		for (i = 0; i < MAX_CHECK_TIME; i++) {
149 			nicvf_link_status_update(nic, &link);
150 			if (link.link_status == ETH_LINK_UP)
151 				break;
152 			rte_delay_ms(CHECK_INTERVAL);
153 		}
154 	} else {
155 		nicvf_link_status_update(nic, &link);
156 	}
157 
158 	return rte_eth_linkstatus_set(dev, &link);
159 }
160 
161 static int
162 nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
163 {
164 	struct nicvf *nic = nicvf_pmd_priv(dev);
165 	uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
166 	size_t i;
167 	struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
168 
169 	PMD_INIT_FUNC_TRACE();
170 
171 	if (frame_size > NIC_HW_MAX_FRS)
172 		return -EINVAL;
173 
174 	if (frame_size < NIC_HW_MIN_FRS)
175 		return -EINVAL;
176 
177 	buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
178 
179 	/*
180 	 * Refuse mtu that requires the support of scattered packets
181 	 * when this feature has not been enabled before.
182 	 */
183 	if (!dev->data->scattered_rx &&
184 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
185 		return -EINVAL;
186 
187 	/* check <seg size> * <max_seg>  >= max_frame */
188 	if (dev->data->scattered_rx &&
189 		(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
190 		return -EINVAL;
191 
192 	if (frame_size > ETHER_MAX_LEN)
193 		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
194 	else
195 		rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
196 
197 	if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
198 		return -EINVAL;
199 
200 	/* Update max frame size */
201 	rxmode->max_rx_pkt_len = (uint32_t)frame_size;
202 	nic->mtu = mtu;
203 
204 	for (i = 0; i < nic->sqs_count; i++)
205 		nic->snicvf[i]->mtu = mtu;
206 
207 	return 0;
208 }
209 
210 static int
211 nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
212 {
213 	uint64_t *data = regs->data;
214 	struct nicvf *nic = nicvf_pmd_priv(dev);
215 
216 	if (data == NULL) {
217 		regs->length = nicvf_reg_get_count();
218 		regs->width = THUNDERX_REG_BYTES;
219 		return 0;
220 	}
221 
222 	/* Support only full register dump */
223 	if ((regs->length == 0) ||
224 		(regs->length == (uint32_t)nicvf_reg_get_count())) {
225 		regs->version = nic->vendor_id << 16 | nic->device_id;
226 		nicvf_reg_dump(nic, data);
227 		return 0;
228 	}
229 	return -ENOTSUP;
230 }
231 
232 static int
233 nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
234 {
235 	uint16_t qidx;
236 	struct nicvf_hw_rx_qstats rx_qstats;
237 	struct nicvf_hw_tx_qstats tx_qstats;
238 	struct nicvf_hw_stats port_stats;
239 	struct nicvf *nic = nicvf_pmd_priv(dev);
240 	uint16_t rx_start, rx_end;
241 	uint16_t tx_start, tx_end;
242 	size_t i;
243 
244 	/* RX queue indices for the first VF */
245 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
246 
247 	/* Reading per RX ring stats */
248 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
249 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
250 			break;
251 
252 		nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
253 		stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
254 		stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
255 	}
256 
257 	/* TX queue indices for the first VF */
258 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
259 
260 	/* Reading per TX ring stats */
261 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
262 		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
263 			break;
264 
265 		nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
266 		stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
267 		stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
268 	}
269 
270 	for (i = 0; i < nic->sqs_count; i++) {
271 		struct nicvf *snic = nic->snicvf[i];
272 
273 		if (snic == NULL)
274 			break;
275 
276 		/* RX queue indices for a secondary VF */
277 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
278 
279 		/* Reading per RX ring stats */
280 		for (qidx = rx_start; qidx <= rx_end; qidx++) {
281 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
282 				break;
283 
284 			nicvf_hw_get_rx_qstats(snic, &rx_qstats,
285 					       qidx % MAX_RCV_QUEUES_PER_QS);
286 			stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
287 			stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
288 		}
289 
290 		/* TX queue indices for a secondary VF */
291 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
292 		/* Reading per TX ring stats */
293 		for (qidx = tx_start; qidx <= tx_end; qidx++) {
294 			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
295 				break;
296 
297 			nicvf_hw_get_tx_qstats(snic, &tx_qstats,
298 					       qidx % MAX_SND_QUEUES_PER_QS);
299 			stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
300 			stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
301 		}
302 	}
303 
304 	nicvf_hw_get_stats(nic, &port_stats);
305 	stats->ibytes = port_stats.rx_bytes;
306 	stats->ipackets = port_stats.rx_ucast_frames;
307 	stats->ipackets += port_stats.rx_bcast_frames;
308 	stats->ipackets += port_stats.rx_mcast_frames;
309 	stats->ierrors = port_stats.rx_l2_errors;
310 	stats->imissed = port_stats.rx_drop_red;
311 	stats->imissed += port_stats.rx_drop_overrun;
312 	stats->imissed += port_stats.rx_drop_bcast;
313 	stats->imissed += port_stats.rx_drop_mcast;
314 	stats->imissed += port_stats.rx_drop_l3_bcast;
315 	stats->imissed += port_stats.rx_drop_l3_mcast;
316 
317 	stats->obytes = port_stats.tx_bytes_ok;
318 	stats->opackets = port_stats.tx_ucast_frames_ok;
319 	stats->opackets += port_stats.tx_bcast_frames_ok;
320 	stats->opackets += port_stats.tx_mcast_frames_ok;
321 	stats->oerrors = port_stats.tx_drops;
322 
323 	return 0;
324 }
325 
326 static const uint32_t *
327 nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
328 {
329 	size_t copied;
330 	static uint32_t ptypes[32];
331 	struct nicvf *nic = nicvf_pmd_priv(dev);
332 	static const uint32_t ptypes_common[] = {
333 		RTE_PTYPE_L3_IPV4,
334 		RTE_PTYPE_L3_IPV4_EXT,
335 		RTE_PTYPE_L3_IPV6,
336 		RTE_PTYPE_L3_IPV6_EXT,
337 		RTE_PTYPE_L4_TCP,
338 		RTE_PTYPE_L4_UDP,
339 		RTE_PTYPE_L4_FRAG,
340 	};
341 	static const uint32_t ptypes_tunnel[] = {
342 		RTE_PTYPE_TUNNEL_GRE,
343 		RTE_PTYPE_TUNNEL_GENEVE,
344 		RTE_PTYPE_TUNNEL_VXLAN,
345 		RTE_PTYPE_TUNNEL_NVGRE,
346 	};
347 	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
348 
349 	copied = sizeof(ptypes_common);
350 	memcpy(ptypes, ptypes_common, copied);
351 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
352 		memcpy((char *)ptypes + copied, ptypes_tunnel,
353 			sizeof(ptypes_tunnel));
354 		copied += sizeof(ptypes_tunnel);
355 	}
356 
357 	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
358 	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
359 		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
360 		return ptypes;
361 
362 	return NULL;
363 }
364 
365 static void
366 nicvf_dev_stats_reset(struct rte_eth_dev *dev)
367 {
368 	int i;
369 	uint16_t rxqs = 0, txqs = 0;
370 	struct nicvf *nic = nicvf_pmd_priv(dev);
371 	uint16_t rx_start, rx_end;
372 	uint16_t tx_start, tx_end;
373 
374 	/* Reset all primary nic counters */
375 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
376 	for (i = rx_start; i <= rx_end; i++)
377 		rxqs |= (0x3 << (i * 2));
378 
379 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
380 	for (i = tx_start; i <= tx_end; i++)
381 		txqs |= (0x3 << (i * 2));
382 
383 	nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
384 
385 	/* Reset secondary nic queue counters */
386 	for (i = 0; i < nic->sqs_count; i++) {
387 		struct nicvf *snic = nic->snicvf[i];
388 		if (snic == NULL)
389 			break;
390 
391 		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
392 		for (i = rx_start; i <= rx_end; i++)
393 			rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
394 
395 		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
396 		for (i = tx_start; i <= tx_end; i++)
397 			txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
398 
399 		nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
400 	}
401 }
402 
403 /* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
404 static void
405 nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
406 {
407 }
408 
409 static inline uint64_t
410 nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
411 {
412 	uint64_t nic_rss = 0;
413 
414 	if (ethdev_rss & ETH_RSS_IPV4)
415 		nic_rss |= RSS_IP_ENA;
416 
417 	if (ethdev_rss & ETH_RSS_IPV6)
418 		nic_rss |= RSS_IP_ENA;
419 
420 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
421 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
422 
423 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
424 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
425 
426 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
427 		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
428 
429 	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
430 		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
431 
432 	if (ethdev_rss & ETH_RSS_PORT)
433 		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
434 
435 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
436 		if (ethdev_rss & ETH_RSS_VXLAN)
437 			nic_rss |= RSS_TUN_VXLAN_ENA;
438 
439 		if (ethdev_rss & ETH_RSS_GENEVE)
440 			nic_rss |= RSS_TUN_GENEVE_ENA;
441 
442 		if (ethdev_rss & ETH_RSS_NVGRE)
443 			nic_rss |= RSS_TUN_NVGRE_ENA;
444 	}
445 
446 	return nic_rss;
447 }
448 
449 static inline uint64_t
450 nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
451 {
452 	uint64_t ethdev_rss = 0;
453 
454 	if (nic_rss & RSS_IP_ENA)
455 		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
456 
457 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
458 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
459 				ETH_RSS_NONFRAG_IPV6_TCP);
460 
461 	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
462 		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
463 				ETH_RSS_NONFRAG_IPV6_UDP);
464 
465 	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
466 		ethdev_rss |= ETH_RSS_PORT;
467 
468 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
469 		if (nic_rss & RSS_TUN_VXLAN_ENA)
470 			ethdev_rss |= ETH_RSS_VXLAN;
471 
472 		if (nic_rss & RSS_TUN_GENEVE_ENA)
473 			ethdev_rss |= ETH_RSS_GENEVE;
474 
475 		if (nic_rss & RSS_TUN_NVGRE_ENA)
476 			ethdev_rss |= ETH_RSS_NVGRE;
477 	}
478 	return ethdev_rss;
479 }
480 
481 static int
482 nicvf_dev_reta_query(struct rte_eth_dev *dev,
483 		     struct rte_eth_rss_reta_entry64 *reta_conf,
484 		     uint16_t reta_size)
485 {
486 	struct nicvf *nic = nicvf_pmd_priv(dev);
487 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
488 	int ret, i, j;
489 
490 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
491 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
492 			"(%d) doesn't match the number hardware can supported "
493 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
494 		return -EINVAL;
495 	}
496 
497 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
498 	if (ret)
499 		return ret;
500 
501 	/* Copy RETA table */
502 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
503 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
504 			if ((reta_conf[i].mask >> j) & 0x01)
505 				reta_conf[i].reta[j] = tbl[j];
506 	}
507 
508 	return 0;
509 }
510 
511 static int
512 nicvf_dev_reta_update(struct rte_eth_dev *dev,
513 		      struct rte_eth_rss_reta_entry64 *reta_conf,
514 		      uint16_t reta_size)
515 {
516 	struct nicvf *nic = nicvf_pmd_priv(dev);
517 	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
518 	int ret, i, j;
519 
520 	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
521 		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
522 			"(%d) doesn't match the number hardware can supported "
523 			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
524 		return -EINVAL;
525 	}
526 
527 	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
528 	if (ret)
529 		return ret;
530 
531 	/* Copy RETA table */
532 	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
533 		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
534 			if ((reta_conf[i].mask >> j) & 0x01)
535 				tbl[j] = reta_conf[i].reta[j];
536 	}
537 
538 	return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
539 }
540 
541 static int
542 nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
543 			    struct rte_eth_rss_conf *rss_conf)
544 {
545 	struct nicvf *nic = nicvf_pmd_priv(dev);
546 
547 	if (rss_conf->rss_key)
548 		nicvf_rss_get_key(nic, rss_conf->rss_key);
549 
550 	rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
551 	rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
552 	return 0;
553 }
554 
555 static int
556 nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
557 			  struct rte_eth_rss_conf *rss_conf)
558 {
559 	struct nicvf *nic = nicvf_pmd_priv(dev);
560 	uint64_t nic_rss;
561 
562 	if (rss_conf->rss_key &&
563 		rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
564 		RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
565 				rss_conf->rss_key_len);
566 		return -EINVAL;
567 	}
568 
569 	if (rss_conf->rss_key)
570 		nicvf_rss_set_key(nic, rss_conf->rss_key);
571 
572 	nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
573 	nicvf_rss_set_cfg(nic, nic_rss);
574 	return 0;
575 }
576 
577 static int
578 nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
579 		    struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
580 {
581 	const struct rte_memzone *rz;
582 	uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
583 
584 	rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
585 				      nicvf_netdev_qidx(nic, qidx), ring_size,
586 				      NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
587 	if (rz == NULL) {
588 		PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
589 		return -ENOMEM;
590 	}
591 
592 	memset(rz->addr, 0, ring_size);
593 
594 	rxq->phys = rz->iova;
595 	rxq->desc = rz->addr;
596 	rxq->qlen_mask = desc_cnt - 1;
597 
598 	return 0;
599 }
600 
601 static int
602 nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
603 		    struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
604 {
605 	const struct rte_memzone *rz;
606 	uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
607 
608 	rz = rte_eth_dma_zone_reserve(dev, "sq",
609 				      nicvf_netdev_qidx(nic, qidx), ring_size,
610 				      NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
611 	if (rz == NULL) {
612 		PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
613 		return -ENOMEM;
614 	}
615 
616 	memset(rz->addr, 0, ring_size);
617 
618 	sq->phys = rz->iova;
619 	sq->desc = rz->addr;
620 	sq->qlen_mask = desc_cnt - 1;
621 
622 	return 0;
623 }
624 
625 static int
626 nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
627 		      uint32_t desc_cnt, uint32_t buffsz)
628 {
629 	struct nicvf_rbdr *rbdr;
630 	const struct rte_memzone *rz;
631 	uint32_t ring_size;
632 
633 	assert(nic->rbdr == NULL);
634 	rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
635 				  RTE_CACHE_LINE_SIZE, nic->node);
636 	if (rbdr == NULL) {
637 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
638 		return -ENOMEM;
639 	}
640 
641 	ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
642 	rz = rte_eth_dma_zone_reserve(dev, "rbdr",
643 				      nicvf_netdev_qidx(nic, 0), ring_size,
644 				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
645 	if (rz == NULL) {
646 		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
647 		return -ENOMEM;
648 	}
649 
650 	memset(rz->addr, 0, ring_size);
651 
652 	rbdr->phys = rz->iova;
653 	rbdr->tail = 0;
654 	rbdr->next_tail = 0;
655 	rbdr->desc = rz->addr;
656 	rbdr->buffsz = buffsz;
657 	rbdr->qlen_mask = desc_cnt - 1;
658 	rbdr->rbdr_status =
659 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
660 	rbdr->rbdr_door =
661 		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
662 
663 	nic->rbdr = rbdr;
664 	return 0;
665 }
666 
667 static void
668 nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
669 			nicvf_iova_addr_t phy)
670 {
671 	uint16_t qidx;
672 	void *obj;
673 	struct nicvf_rxq *rxq;
674 	uint16_t rx_start, rx_end;
675 
676 	/* Get queue ranges for this VF */
677 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
678 
679 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
680 		rxq = dev->data->rx_queues[qidx];
681 		if (rxq->precharge_cnt) {
682 			obj = (void *)nicvf_mbuff_phy2virt(phy,
683 							   rxq->mbuf_phys_off);
684 			rte_mempool_put(rxq->pool, obj);
685 			rxq->precharge_cnt--;
686 			break;
687 		}
688 	}
689 }
690 
691 static inline void
692 nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
693 {
694 	uint32_t qlen_mask, head;
695 	struct rbdr_entry_t *entry;
696 	struct nicvf_rbdr *rbdr = nic->rbdr;
697 
698 	qlen_mask = rbdr->qlen_mask;
699 	head = rbdr->head;
700 	while (head != rbdr->tail) {
701 		entry = rbdr->desc + head;
702 		nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
703 		head++;
704 		head = head & qlen_mask;
705 	}
706 }
707 
708 static inline void
709 nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
710 {
711 	uint32_t head;
712 
713 	head = txq->head;
714 	while (head != txq->tail) {
715 		if (txq->txbuffs[head]) {
716 			rte_pktmbuf_free_seg(txq->txbuffs[head]);
717 			txq->txbuffs[head] = NULL;
718 		}
719 		head++;
720 		head = head & txq->qlen_mask;
721 	}
722 }
723 
724 static void
725 nicvf_tx_queue_reset(struct nicvf_txq *txq)
726 {
727 	uint32_t txq_desc_cnt = txq->qlen_mask + 1;
728 
729 	memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
730 	memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
731 	txq->tail = 0;
732 	txq->head = 0;
733 	txq->xmit_bufs = 0;
734 }
735 
736 static inline int
737 nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
738 			uint16_t qidx)
739 {
740 	struct nicvf_txq *txq;
741 	int ret;
742 
743 	assert(qidx < MAX_SND_QUEUES_PER_QS);
744 
745 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
746 		RTE_ETH_QUEUE_STATE_STARTED)
747 		return 0;
748 
749 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
750 	txq->pool = NULL;
751 	ret = nicvf_qset_sq_config(nic, qidx, txq);
752 	if (ret) {
753 		PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
754 			     nic->vf_id, qidx, ret);
755 		goto config_sq_error;
756 	}
757 
758 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
759 		RTE_ETH_QUEUE_STATE_STARTED;
760 	return ret;
761 
762 config_sq_error:
763 	nicvf_qset_sq_reclaim(nic, qidx);
764 	return ret;
765 }
766 
767 static inline int
768 nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
769 		       uint16_t qidx)
770 {
771 	struct nicvf_txq *txq;
772 	int ret;
773 
774 	assert(qidx < MAX_SND_QUEUES_PER_QS);
775 
776 	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
777 		RTE_ETH_QUEUE_STATE_STOPPED)
778 		return 0;
779 
780 	ret = nicvf_qset_sq_reclaim(nic, qidx);
781 	if (ret)
782 		PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
783 			     nic->vf_id, qidx, ret);
784 
785 	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
786 	nicvf_tx_queue_release_mbufs(txq);
787 	nicvf_tx_queue_reset(txq);
788 
789 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
790 		RTE_ETH_QUEUE_STATE_STOPPED;
791 	return ret;
792 }
793 
794 static inline int
795 nicvf_configure_cpi(struct rte_eth_dev *dev)
796 {
797 	struct nicvf *nic = nicvf_pmd_priv(dev);
798 	uint16_t qidx, qcnt;
799 	int ret;
800 
801 	/* Count started rx queues */
802 	for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
803 		if (dev->data->rx_queue_state[qidx] ==
804 		    RTE_ETH_QUEUE_STATE_STARTED)
805 			qcnt++;
806 
807 	nic->cpi_alg = CPI_ALG_NONE;
808 	ret = nicvf_mbox_config_cpi(nic, qcnt);
809 	if (ret)
810 		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
811 
812 	return ret;
813 }
814 
815 static inline int
816 nicvf_configure_rss(struct rte_eth_dev *dev)
817 {
818 	struct nicvf *nic = nicvf_pmd_priv(dev);
819 	uint64_t rsshf;
820 	int ret = -EINVAL;
821 
822 	rsshf = nicvf_rss_ethdev_to_nic(nic,
823 			dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
824 	PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
825 		    dev->data->dev_conf.rxmode.mq_mode,
826 		    dev->data->nb_rx_queues,
827 		    dev->data->dev_conf.lpbk_mode, rsshf);
828 
829 	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
830 		ret = nicvf_rss_term(nic);
831 	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
832 		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
833 	if (ret)
834 		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
835 
836 	return ret;
837 }
838 
839 static int
840 nicvf_configure_rss_reta(struct rte_eth_dev *dev)
841 {
842 	struct nicvf *nic = nicvf_pmd_priv(dev);
843 	unsigned int idx, qmap_size;
844 	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
845 	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
846 
847 	if (nic->cpi_alg != CPI_ALG_NONE)
848 		return -EINVAL;
849 
850 	/* Prepare queue map */
851 	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
852 		if (dev->data->rx_queue_state[idx] ==
853 				RTE_ETH_QUEUE_STATE_STARTED)
854 			qmap[qmap_size++] = idx;
855 	}
856 
857 	/* Update default RSS RETA */
858 	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
859 		default_reta[idx] = qmap[idx % qmap_size];
860 
861 	return nicvf_rss_reta_update(nic, default_reta,
862 				     NIC_MAX_RSS_IDR_TBL_SIZE);
863 }
864 
865 static void
866 nicvf_dev_tx_queue_release(void *sq)
867 {
868 	struct nicvf_txq *txq;
869 
870 	PMD_INIT_FUNC_TRACE();
871 
872 	txq = (struct nicvf_txq *)sq;
873 	if (txq) {
874 		if (txq->txbuffs != NULL) {
875 			nicvf_tx_queue_release_mbufs(txq);
876 			rte_free(txq->txbuffs);
877 			txq->txbuffs = NULL;
878 		}
879 		rte_free(txq);
880 	}
881 }
882 
883 static void
884 nicvf_set_tx_function(struct rte_eth_dev *dev)
885 {
886 	struct nicvf_txq *txq;
887 	size_t i;
888 	bool multiseg = false;
889 
890 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
891 		txq = dev->data->tx_queues[i];
892 		if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
893 			multiseg = true;
894 			break;
895 		}
896 	}
897 
898 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
899 	if (multiseg) {
900 		PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
901 		dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
902 	} else {
903 		PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
904 		dev->tx_pkt_burst = nicvf_xmit_pkts;
905 	}
906 
907 	if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
908 		PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
909 	else
910 		PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
911 }
912 
913 static void
914 nicvf_set_rx_function(struct rte_eth_dev *dev)
915 {
916 	if (dev->data->scattered_rx) {
917 		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
918 		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
919 	} else {
920 		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
921 		dev->rx_pkt_burst = nicvf_recv_pkts;
922 	}
923 }
924 
925 static int
926 nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
927 			 uint16_t nb_desc, unsigned int socket_id,
928 			 const struct rte_eth_txconf *tx_conf)
929 {
930 	uint16_t tx_free_thresh;
931 	bool is_single_pool;
932 	struct nicvf_txq *txq;
933 	struct nicvf *nic = nicvf_pmd_priv(dev);
934 	uint64_t conf_offloads, offload_capa, unsupported_offloads;
935 
936 	PMD_INIT_FUNC_TRACE();
937 
938 	if (qidx >= MAX_SND_QUEUES_PER_QS)
939 		nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
940 
941 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
942 
943 	/* Socket id check */
944 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
945 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
946 		socket_id, nic->node);
947 
948 	conf_offloads = tx_conf->offloads;
949 	offload_capa = NICVF_TX_OFFLOAD_CAPA;
950 
951 	unsupported_offloads = conf_offloads & ~offload_capa;
952 	if (unsupported_offloads) {
953 		PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
954 		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
955 		      unsupported_offloads, conf_offloads, offload_capa);
956 		return -ENOTSUP;
957 	}
958 
959 	/* Tx deferred start is not supported */
960 	if (tx_conf->tx_deferred_start) {
961 		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
962 		return -EINVAL;
963 	}
964 
965 	/* Roundup nb_desc to available qsize and validate max number of desc */
966 	nb_desc = nicvf_qsize_sq_roundup(nb_desc);
967 	if (nb_desc == 0) {
968 		PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
969 		return -EINVAL;
970 	}
971 
972 	/* Validate tx_free_thresh */
973 	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
974 				tx_conf->tx_free_thresh :
975 				NICVF_DEFAULT_TX_FREE_THRESH);
976 
977 	if (tx_free_thresh > (nb_desc) ||
978 		tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
979 		PMD_INIT_LOG(ERR,
980 			"tx_free_thresh must be less than the number of TX "
981 			"descriptors. (tx_free_thresh=%u port=%d "
982 			"queue=%d)", (unsigned int)tx_free_thresh,
983 			(int)dev->data->port_id, (int)qidx);
984 		return -EINVAL;
985 	}
986 
987 	/* Free memory prior to re-allocation if needed. */
988 	if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
989 		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
990 				nicvf_netdev_qidx(nic, qidx));
991 		nicvf_dev_tx_queue_release(
992 			dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
993 		dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
994 	}
995 
996 	/* Allocating tx queue data structure */
997 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
998 					RTE_CACHE_LINE_SIZE, nic->node);
999 	if (txq == NULL) {
1000 		PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
1001 			     nicvf_netdev_qidx(nic, qidx));
1002 		return -ENOMEM;
1003 	}
1004 
1005 	txq->nic = nic;
1006 	txq->queue_id = qidx;
1007 	txq->tx_free_thresh = tx_free_thresh;
1008 	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
1009 	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
1010 	txq->offloads = conf_offloads;
1011 
1012 	is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
1013 
1014 	/* Choose optimum free threshold value for multipool case */
1015 	if (!is_single_pool) {
1016 		txq->tx_free_thresh = (uint16_t)
1017 		(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1018 				NICVF_TX_FREE_MPOOL_THRESH :
1019 				tx_conf->tx_free_thresh);
1020 		txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1021 	} else {
1022 		txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1023 	}
1024 
1025 	/* Allocate software ring */
1026 	txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1027 				nb_desc * sizeof(struct rte_mbuf *),
1028 				RTE_CACHE_LINE_SIZE, nic->node);
1029 
1030 	if (txq->txbuffs == NULL) {
1031 		nicvf_dev_tx_queue_release(txq);
1032 		return -ENOMEM;
1033 	}
1034 
1035 	if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1036 		PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1037 		nicvf_dev_tx_queue_release(txq);
1038 		return -ENOMEM;
1039 	}
1040 
1041 	nicvf_tx_queue_reset(txq);
1042 
1043 	PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
1044 			" phys=0x%" PRIx64 " offloads=0x%" PRIx64,
1045 			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1046 			txq->phys, txq->offloads);
1047 
1048 	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1049 	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1050 		RTE_ETH_QUEUE_STATE_STOPPED;
1051 	return 0;
1052 }
1053 
1054 static inline void
1055 nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1056 {
1057 	uint32_t rxq_cnt;
1058 	uint32_t nb_pkts, released_pkts = 0;
1059 	uint32_t refill_cnt = 0;
1060 	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1061 
1062 	if (dev->rx_pkt_burst == NULL)
1063 		return;
1064 
1065 	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1066 				nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1067 		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1068 					NICVF_MAX_RX_FREE_THRESH);
1069 		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
1070 		while (nb_pkts) {
1071 			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1072 			released_pkts++;
1073 		}
1074 	}
1075 
1076 
1077 	refill_cnt += nicvf_dev_rbdr_refill(dev,
1078 			nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1079 
1080 	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
1081 		    released_pkts, refill_cnt);
1082 }
1083 
1084 static void
1085 nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1086 {
1087 	rxq->head = 0;
1088 	rxq->available_space = 0;
1089 	rxq->recv_buffers = 0;
1090 }
1091 
1092 static inline int
1093 nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1094 			uint16_t qidx)
1095 {
1096 	struct nicvf_rxq *rxq;
1097 	int ret;
1098 
1099 	assert(qidx < MAX_RCV_QUEUES_PER_QS);
1100 
1101 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1102 		RTE_ETH_QUEUE_STATE_STARTED)
1103 		return 0;
1104 
1105 	/* Update rbdr pointer to all rxq */
1106 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1107 	rxq->shared_rbdr = nic->rbdr;
1108 
1109 	ret = nicvf_qset_rq_config(nic, qidx, rxq);
1110 	if (ret) {
1111 		PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1112 			     nic->vf_id, qidx, ret);
1113 		goto config_rq_error;
1114 	}
1115 	ret = nicvf_qset_cq_config(nic, qidx, rxq);
1116 	if (ret) {
1117 		PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1118 			     nic->vf_id, qidx, ret);
1119 		goto config_cq_error;
1120 	}
1121 
1122 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1123 		RTE_ETH_QUEUE_STATE_STARTED;
1124 	return 0;
1125 
1126 config_cq_error:
1127 	nicvf_qset_cq_reclaim(nic, qidx);
1128 config_rq_error:
1129 	nicvf_qset_rq_reclaim(nic, qidx);
1130 	return ret;
1131 }
1132 
1133 static inline int
1134 nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1135 		       uint16_t qidx)
1136 {
1137 	struct nicvf_rxq *rxq;
1138 	int ret, other_error;
1139 
1140 	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1141 		RTE_ETH_QUEUE_STATE_STOPPED)
1142 		return 0;
1143 
1144 	ret = nicvf_qset_rq_reclaim(nic, qidx);
1145 	if (ret)
1146 		PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1147 			     nic->vf_id, qidx, ret);
1148 
1149 	other_error = ret;
1150 	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1151 	nicvf_rx_queue_release_mbufs(dev, rxq);
1152 	nicvf_rx_queue_reset(rxq);
1153 
1154 	ret = nicvf_qset_cq_reclaim(nic, qidx);
1155 	if (ret)
1156 		PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1157 			     nic->vf_id, qidx, ret);
1158 
1159 	other_error |= ret;
1160 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1161 		RTE_ETH_QUEUE_STATE_STOPPED;
1162 	return other_error;
1163 }
1164 
1165 static void
1166 nicvf_dev_rx_queue_release(void *rx_queue)
1167 {
1168 	PMD_INIT_FUNC_TRACE();
1169 
1170 	rte_free(rx_queue);
1171 }
1172 
1173 static int
1174 nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1175 {
1176 	struct nicvf *nic = nicvf_pmd_priv(dev);
1177 	int ret;
1178 
1179 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
1180 		nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1181 
1182 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1183 
1184 	ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1185 	if (ret)
1186 		return ret;
1187 
1188 	ret = nicvf_configure_cpi(dev);
1189 	if (ret)
1190 		return ret;
1191 
1192 	return nicvf_configure_rss_reta(dev);
1193 }
1194 
1195 static int
1196 nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1197 {
1198 	int ret;
1199 	struct nicvf *nic = nicvf_pmd_priv(dev);
1200 
1201 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1202 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1203 
1204 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1205 
1206 	ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1207 	ret |= nicvf_configure_cpi(dev);
1208 	ret |= nicvf_configure_rss_reta(dev);
1209 	return ret;
1210 }
1211 
1212 static int
1213 nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1214 {
1215 	struct nicvf *nic = nicvf_pmd_priv(dev);
1216 
1217 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1218 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1219 
1220 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1221 
1222 	return nicvf_vf_start_tx_queue(dev, nic, qidx);
1223 }
1224 
1225 static int
1226 nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1227 {
1228 	struct nicvf *nic = nicvf_pmd_priv(dev);
1229 
1230 	if (qidx >= MAX_SND_QUEUES_PER_QS)
1231 		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1232 
1233 	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1234 
1235 	return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1236 }
1237 
1238 static inline void
1239 nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
1240 {
1241 	uintptr_t p;
1242 	struct rte_mbuf mb_def;
1243 
1244 	RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
1245 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
1246 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
1247 				offsetof(struct rte_mbuf, data_off) != 2);
1248 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
1249 				offsetof(struct rte_mbuf, data_off) != 4);
1250 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
1251 				offsetof(struct rte_mbuf, data_off) != 6);
1252 	mb_def.nb_segs = 1;
1253 	mb_def.data_off = RTE_PKTMBUF_HEADROOM;
1254 	mb_def.port = rxq->port_id;
1255 	rte_mbuf_refcnt_set(&mb_def, 1);
1256 
1257 	/* Prevent compiler reordering: rearm_data covers previous fields */
1258 	rte_compiler_barrier();
1259 	p = (uintptr_t)&mb_def.rearm_data;
1260 	rxq->mbuf_initializer.value = *(uint64_t *)p;
1261 }
1262 
1263 static int
1264 nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1265 			 uint16_t nb_desc, unsigned int socket_id,
1266 			 const struct rte_eth_rxconf *rx_conf,
1267 			 struct rte_mempool *mp)
1268 {
1269 	uint16_t rx_free_thresh;
1270 	struct nicvf_rxq *rxq;
1271 	struct nicvf *nic = nicvf_pmd_priv(dev);
1272 	uint64_t conf_offloads, offload_capa, unsupported_offloads;
1273 
1274 	PMD_INIT_FUNC_TRACE();
1275 
1276 	if (qidx >= MAX_RCV_QUEUES_PER_QS)
1277 		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1278 
1279 	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1280 
1281 	/* Socket id check */
1282 	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1283 		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1284 		socket_id, nic->node);
1285 
1286 
1287 	conf_offloads = rx_conf->offloads;
1288 
1289 	if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1290 		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1291 		conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1292 	}
1293 
1294 	offload_capa = NICVF_RX_OFFLOAD_CAPA;
1295 	unsupported_offloads = conf_offloads & ~offload_capa;
1296 
1297 	if (unsupported_offloads) {
1298 		PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
1299 		      "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1300 		      unsupported_offloads, conf_offloads, offload_capa);
1301 		return -ENOTSUP;
1302 	}
1303 
1304 	/* Mempool memory must be contiguous, so must be one memory segment*/
1305 	if (mp->nb_mem_chunks != 1) {
1306 		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1307 		return -EINVAL;
1308 	}
1309 
1310 	/* Mempool memory must be physically contiguous */
1311 	if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1312 		PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1313 		return -EINVAL;
1314 	}
1315 
1316 	/* Rx deferred start is not supported */
1317 	if (rx_conf->rx_deferred_start) {
1318 		PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1319 		return -EINVAL;
1320 	}
1321 
1322 	/* Roundup nb_desc to available qsize and validate max number of desc */
1323 	nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1324 	if (nb_desc == 0) {
1325 		PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1326 		return -EINVAL;
1327 	}
1328 
1329 	/* Check rx_free_thresh upper bound */
1330 	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1331 				rx_conf->rx_free_thresh :
1332 				NICVF_DEFAULT_RX_FREE_THRESH);
1333 	if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1334 		rx_free_thresh >= nb_desc * .75) {
1335 		PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1336 				rx_free_thresh);
1337 		return -EINVAL;
1338 	}
1339 
1340 	/* Free memory prior to re-allocation if needed */
1341 	if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1342 		PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1343 				nicvf_netdev_qidx(nic, qidx));
1344 		nicvf_dev_rx_queue_release(
1345 			dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1346 		dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1347 	}
1348 
1349 	/* Allocate rxq memory */
1350 	rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1351 					RTE_CACHE_LINE_SIZE, nic->node);
1352 	if (rxq == NULL) {
1353 		PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1354 			     nicvf_netdev_qidx(nic, qidx));
1355 		return -ENOMEM;
1356 	}
1357 
1358 	rxq->nic = nic;
1359 	rxq->pool = mp;
1360 	rxq->queue_id = qidx;
1361 	rxq->port_id = dev->data->port_id;
1362 	rxq->rx_free_thresh = rx_free_thresh;
1363 	rxq->rx_drop_en = rx_conf->rx_drop_en;
1364 	rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1365 	rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1366 	rxq->precharge_cnt = 0;
1367 
1368 	if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1369 		rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1370 	else
1371 		rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1372 
1373 	nicvf_rxq_mbuf_setup(rxq);
1374 
1375 	/* Alloc completion queue */
1376 	if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1377 		PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1378 		nicvf_dev_rx_queue_release(rxq);
1379 		return -ENOMEM;
1380 	}
1381 
1382 	nicvf_rx_queue_reset(rxq);
1383 
1384 	PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
1385 			" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
1386 			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1387 			rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
1388 
1389 	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1390 	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1391 		RTE_ETH_QUEUE_STATE_STOPPED;
1392 	return 0;
1393 }
1394 
1395 static void
1396 nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1397 {
1398 	struct nicvf *nic = nicvf_pmd_priv(dev);
1399 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1400 
1401 	PMD_INIT_FUNC_TRACE();
1402 
1403 	dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1404 
1405 	/* Autonegotiation may be disabled */
1406 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
1407 	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
1408 				 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1409 	if (nicvf_hw_version(nic) != PCI_SUB_DEVICE_ID_CN81XX_NICVF)
1410 		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
1411 
1412 	dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1413 	dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1414 	dev_info->max_rx_queues =
1415 			(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1416 	dev_info->max_tx_queues =
1417 			(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1418 	dev_info->max_mac_addrs = 1;
1419 	dev_info->max_vfs = pci_dev->max_vfs;
1420 
1421 	dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1422 	dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1423 	dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1424 	dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1425 
1426 	dev_info->reta_size = nic->rss_info.rss_size;
1427 	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1428 	dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1429 	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1430 		dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1431 
1432 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1433 		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1434 		.rx_drop_en = 0,
1435 		.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
1436 	};
1437 
1438 	dev_info->default_txconf = (struct rte_eth_txconf) {
1439 		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1440 		.txq_flags =
1441 			ETH_TXQ_FLAGS_NOMULTSEGS  |
1442 			ETH_TXQ_FLAGS_NOREFCOUNT  |
1443 			ETH_TXQ_FLAGS_NOMULTMEMP  |
1444 			ETH_TXQ_FLAGS_NOVLANOFFL  |
1445 			ETH_TXQ_FLAGS_NOXSUMSCTP,
1446 		.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
1447 			DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
1448 			DEV_TX_OFFLOAD_UDP_CKSUM          |
1449 			DEV_TX_OFFLOAD_TCP_CKSUM,
1450 	};
1451 }
1452 
1453 static nicvf_iova_addr_t
1454 rbdr_rte_mempool_get(void *dev, void *opaque)
1455 {
1456 	uint16_t qidx;
1457 	uintptr_t mbuf;
1458 	struct nicvf_rxq *rxq;
1459 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1460 	struct nicvf *nic = (struct nicvf *)opaque;
1461 	uint16_t rx_start, rx_end;
1462 
1463 	/* Get queue ranges for this VF */
1464 	nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1465 
1466 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1467 		rxq = eth_dev->data->rx_queues[qidx];
1468 		/* Maintain equal buffer count across all pools */
1469 		if (rxq->precharge_cnt >= rxq->qlen_mask)
1470 			continue;
1471 		rxq->precharge_cnt++;
1472 		mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1473 		if (mbuf)
1474 			return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1475 	}
1476 	return 0;
1477 }
1478 
1479 static int
1480 nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1481 {
1482 	int ret;
1483 	uint16_t qidx, data_off;
1484 	uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1485 	uint64_t mbuf_phys_off = 0;
1486 	struct nicvf_rxq *rxq;
1487 	struct rte_mbuf *mbuf;
1488 	uint16_t rx_start, rx_end;
1489 	uint16_t tx_start, tx_end;
1490 	bool vlan_strip;
1491 
1492 	PMD_INIT_FUNC_TRACE();
1493 
1494 	/* Userspace process exited without proper shutdown in last run */
1495 	if (nicvf_qset_rbdr_active(nic, 0))
1496 		nicvf_vf_stop(dev, nic, false);
1497 
1498 	/* Get queue ranges for this VF */
1499 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1500 
1501 	/*
1502 	 * Thunderx nicvf PMD can support more than one pool per port only when
1503 	 * 1) Data payload size is same across all the pools in given port
1504 	 * AND
1505 	 * 2) All mbuffs in the pools are from the same hugepage
1506 	 * AND
1507 	 * 3) Mbuff metadata size is same across all the pools in given port
1508 	 *
1509 	 * This is to support existing application that uses multiple pool/port.
1510 	 * But, the purpose of using multipool for QoS will not be addressed.
1511 	 *
1512 	 */
1513 
1514 	/* Validate mempool attributes */
1515 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1516 		rxq = dev->data->rx_queues[qidx];
1517 		rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1518 		mbuf = rte_pktmbuf_alloc(rxq->pool);
1519 		if (mbuf == NULL) {
1520 			PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1521 				     "pool=%s",
1522 				     nic->vf_id, qidx, rxq->pool->name);
1523 			return -ENOMEM;
1524 		}
1525 		data_off = nicvf_mbuff_meta_length(mbuf);
1526 		data_off += RTE_PKTMBUF_HEADROOM;
1527 		rte_pktmbuf_free(mbuf);
1528 
1529 		if (data_off % RTE_CACHE_LINE_SIZE) {
1530 			PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
1531 				rxq->pool->name, data_off,
1532 				data_off % RTE_CACHE_LINE_SIZE);
1533 			return -EINVAL;
1534 		}
1535 		rxq->mbuf_phys_off -= data_off;
1536 
1537 		if (mbuf_phys_off == 0)
1538 			mbuf_phys_off = rxq->mbuf_phys_off;
1539 		if (mbuf_phys_off != rxq->mbuf_phys_off) {
1540 			PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1541 				     PRIx64, rxq->pool->name, nic->vf_id,
1542 				     mbuf_phys_off);
1543 			return -EINVAL;
1544 		}
1545 	}
1546 
1547 	/* Check the level of buffers in the pool */
1548 	total_rxq_desc = 0;
1549 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1550 		rxq = dev->data->rx_queues[qidx];
1551 		/* Count total numbers of rxq descs */
1552 		total_rxq_desc += rxq->qlen_mask + 1;
1553 		exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1554 		exp_buffs *= dev->data->nb_rx_queues;
1555 		if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1556 			PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1557 				     rxq->pool->name,
1558 				     rte_mempool_avail_count(rxq->pool),
1559 				     exp_buffs);
1560 			return -ENOENT;
1561 		}
1562 	}
1563 
1564 	/* Check RBDR desc overflow */
1565 	ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1566 	if (ret == 0) {
1567 		PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1568 			     "VF%d", nic->vf_id);
1569 		return -ENOMEM;
1570 	}
1571 
1572 	/* Enable qset */
1573 	ret = nicvf_qset_config(nic);
1574 	if (ret) {
1575 		PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1576 			     nic->vf_id);
1577 		return ret;
1578 	}
1579 
1580 	/* Allocate RBDR and RBDR ring desc */
1581 	nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1582 	ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1583 	if (ret) {
1584 		PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1585 			     "VF%d", nic->vf_id);
1586 		goto qset_reclaim;
1587 	}
1588 
1589 	/* Enable and configure RBDR registers */
1590 	ret = nicvf_qset_rbdr_config(nic, 0);
1591 	if (ret) {
1592 		PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1593 			     nic->vf_id);
1594 		goto qset_rbdr_free;
1595 	}
1596 
1597 	/* Fill rte_mempool buffers in RBDR pool and precharge it */
1598 	ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1599 					total_rxq_desc);
1600 	if (ret) {
1601 		PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1602 			     nic->vf_id);
1603 		goto qset_rbdr_reclaim;
1604 	}
1605 
1606 	PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1607 		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1608 
1609 	/* Configure VLAN Strip */
1610 	vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
1611 			DEV_RX_OFFLOAD_VLAN_STRIP);
1612 	nicvf_vlan_hw_strip(nic, vlan_strip);
1613 
1614 	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1615 	 * to the 64bit memory address.
1616 	 * The alignment creates a hole in mbuf(between the end of headroom and
1617 	 * packet data start). The new revision of the HW provides an option to
1618 	 * disable the L3 alignment feature and make mbuf layout looks
1619 	 * more like other NICs. For better application compatibility, disabling
1620 	 * l3 alignment feature on the hardware revisions it supports
1621 	 */
1622 	nicvf_apad_config(nic, false);
1623 
1624 	/* Get queue ranges for this VF */
1625 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1626 
1627 	/* Configure TX queues */
1628 	for (qidx = tx_start; qidx <= tx_end; qidx++) {
1629 		ret = nicvf_vf_start_tx_queue(dev, nic,
1630 			qidx % MAX_SND_QUEUES_PER_QS);
1631 		if (ret)
1632 			goto start_txq_error;
1633 	}
1634 
1635 	/* Configure RX queues */
1636 	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1637 		ret = nicvf_vf_start_rx_queue(dev, nic,
1638 			qidx % MAX_RCV_QUEUES_PER_QS);
1639 		if (ret)
1640 			goto start_rxq_error;
1641 	}
1642 
1643 	if (!nic->sqs_mode) {
1644 		/* Configure CPI algorithm */
1645 		ret = nicvf_configure_cpi(dev);
1646 		if (ret)
1647 			goto start_txq_error;
1648 
1649 		ret = nicvf_mbox_get_rss_size(nic);
1650 		if (ret) {
1651 			PMD_INIT_LOG(ERR, "Failed to get rss table size");
1652 			goto qset_rss_error;
1653 		}
1654 
1655 		/* Configure RSS */
1656 		ret = nicvf_configure_rss(dev);
1657 		if (ret)
1658 			goto qset_rss_error;
1659 	}
1660 
1661 	/* Done; Let PF make the BGX's RX and TX switches to ON position */
1662 	nicvf_mbox_cfg_done(nic);
1663 	return 0;
1664 
1665 qset_rss_error:
1666 	nicvf_rss_term(nic);
1667 start_rxq_error:
1668 	for (qidx = rx_start; qidx <= rx_end; qidx++)
1669 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1670 start_txq_error:
1671 	for (qidx = tx_start; qidx <= tx_end; qidx++)
1672 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1673 qset_rbdr_reclaim:
1674 	nicvf_qset_rbdr_reclaim(nic, 0);
1675 	nicvf_rbdr_release_mbufs(dev, nic);
1676 qset_rbdr_free:
1677 	if (nic->rbdr) {
1678 		rte_free(nic->rbdr);
1679 		nic->rbdr = NULL;
1680 	}
1681 qset_reclaim:
1682 	nicvf_qset_reclaim(nic);
1683 	return ret;
1684 }
1685 
1686 static int
1687 nicvf_dev_start(struct rte_eth_dev *dev)
1688 {
1689 	uint16_t qidx;
1690 	int ret;
1691 	size_t i;
1692 	struct nicvf *nic = nicvf_pmd_priv(dev);
1693 	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1694 	uint16_t mtu;
1695 	uint32_t buffsz = 0, rbdrsz = 0;
1696 	struct rte_pktmbuf_pool_private *mbp_priv;
1697 	struct nicvf_rxq *rxq;
1698 
1699 	PMD_INIT_FUNC_TRACE();
1700 
1701 	/* This function must be called for a primary device */
1702 	assert_primary(nic);
1703 
1704 	/* Validate RBDR buff size */
1705 	for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1706 		rxq = dev->data->rx_queues[qidx];
1707 		mbp_priv = rte_mempool_get_priv(rxq->pool);
1708 		buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1709 		if (buffsz % 128) {
1710 			PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1711 			return -EINVAL;
1712 		}
1713 		if (rbdrsz == 0)
1714 			rbdrsz = buffsz;
1715 		if (rbdrsz != buffsz) {
1716 			PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1717 				     qidx, rbdrsz, buffsz);
1718 			return -EINVAL;
1719 		}
1720 	}
1721 
1722 	/* Configure loopback */
1723 	ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1724 	if (ret) {
1725 		PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1726 		return ret;
1727 	}
1728 
1729 	/* Reset all statistics counters attached to this port */
1730 	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1731 	if (ret) {
1732 		PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1733 		return ret;
1734 	}
1735 
1736 	/* Setup scatter mode if needed by jumbo */
1737 	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1738 					    2 * VLAN_TAG_SIZE > buffsz)
1739 		dev->data->scattered_rx = 1;
1740 	if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
1741 		dev->data->scattered_rx = 1;
1742 
1743 	/* Setup MTU based on max_rx_pkt_len or default */
1744 	mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
1745 		dev->data->dev_conf.rxmode.max_rx_pkt_len
1746 			-  ETHER_HDR_LEN - ETHER_CRC_LEN
1747 		: ETHER_MTU;
1748 
1749 	if (nicvf_dev_set_mtu(dev, mtu)) {
1750 		PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1751 		return -EBUSY;
1752 	}
1753 
1754 	ret = nicvf_vf_start(dev, nic, rbdrsz);
1755 	if (ret != 0)
1756 		return ret;
1757 
1758 	for (i = 0; i < nic->sqs_count; i++) {
1759 		assert(nic->snicvf[i]);
1760 
1761 		ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1762 		if (ret != 0)
1763 			return ret;
1764 	}
1765 
1766 	/* Configure callbacks based on scatter mode */
1767 	nicvf_set_tx_function(dev);
1768 	nicvf_set_rx_function(dev);
1769 
1770 	return 0;
1771 }
1772 
1773 static void
1774 nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1775 {
1776 	size_t i;
1777 	int ret;
1778 	struct nicvf *nic = nicvf_pmd_priv(dev);
1779 
1780 	PMD_INIT_FUNC_TRACE();
1781 
1782 	/* Teardown secondary vf first */
1783 	for (i = 0; i < nic->sqs_count; i++) {
1784 		if (!nic->snicvf[i])
1785 			continue;
1786 
1787 		nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1788 	}
1789 
1790 	/* Stop the primary VF now */
1791 	nicvf_vf_stop(dev, nic, cleanup);
1792 
1793 	/* Disable loopback */
1794 	ret = nicvf_loopback_config(nic, 0);
1795 	if (ret)
1796 		PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1797 
1798 	/* Reclaim CPI configuration */
1799 	ret = nicvf_mbox_config_cpi(nic, 0);
1800 	if (ret)
1801 		PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1802 }
1803 
1804 static void
1805 nicvf_dev_stop(struct rte_eth_dev *dev)
1806 {
1807 	PMD_INIT_FUNC_TRACE();
1808 
1809 	nicvf_dev_stop_cleanup(dev, false);
1810 }
1811 
1812 static void
1813 nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1814 {
1815 	int ret;
1816 	uint16_t qidx;
1817 	uint16_t tx_start, tx_end;
1818 	uint16_t rx_start, rx_end;
1819 
1820 	PMD_INIT_FUNC_TRACE();
1821 
1822 	if (cleanup) {
1823 		/* Let PF make the BGX's RX and TX switches to OFF position */
1824 		nicvf_mbox_shutdown(nic);
1825 	}
1826 
1827 	/* Disable VLAN Strip */
1828 	nicvf_vlan_hw_strip(nic, 0);
1829 
1830 	/* Get queue ranges for this VF */
1831 	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1832 
1833 	for (qidx = tx_start; qidx <= tx_end; qidx++)
1834 		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1835 
1836 	/* Get queue ranges for this VF */
1837 	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1838 
1839 	/* Reclaim rq */
1840 	for (qidx = rx_start; qidx <= rx_end; qidx++)
1841 		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1842 
1843 	/* Reclaim RBDR */
1844 	ret = nicvf_qset_rbdr_reclaim(nic, 0);
1845 	if (ret)
1846 		PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1847 
1848 	/* Move all charged buffers in RBDR back to pool */
1849 	if (nic->rbdr != NULL)
1850 		nicvf_rbdr_release_mbufs(dev, nic);
1851 
1852 	/* Disable qset */
1853 	ret = nicvf_qset_reclaim(nic);
1854 	if (ret)
1855 		PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1856 
1857 	/* Disable all interrupts */
1858 	nicvf_disable_all_interrupts(nic);
1859 
1860 	/* Free RBDR SW structure */
1861 	if (nic->rbdr) {
1862 		rte_free(nic->rbdr);
1863 		nic->rbdr = NULL;
1864 	}
1865 }
1866 
1867 static void
1868 nicvf_dev_close(struct rte_eth_dev *dev)
1869 {
1870 	size_t i;
1871 	struct nicvf *nic = nicvf_pmd_priv(dev);
1872 
1873 	PMD_INIT_FUNC_TRACE();
1874 
1875 	nicvf_dev_stop_cleanup(dev, true);
1876 	nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1877 
1878 	for (i = 0; i < nic->sqs_count; i++) {
1879 		if (!nic->snicvf[i])
1880 			continue;
1881 
1882 		nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1883 	}
1884 }
1885 
1886 static int
1887 nicvf_request_sqs(struct nicvf *nic)
1888 {
1889 	size_t i;
1890 
1891 	assert_primary(nic);
1892 	assert(nic->sqs_count > 0);
1893 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1894 
1895 	/* Set no of Rx/Tx queues in each of the SQsets */
1896 	for (i = 0; i < nic->sqs_count; i++) {
1897 		if (nicvf_svf_empty())
1898 			rte_panic("Cannot assign sufficient number of "
1899 				  "secondary queues to primary VF%" PRIu8 "\n",
1900 				  nic->vf_id);
1901 
1902 		nic->snicvf[i] = nicvf_svf_pop();
1903 		nic->snicvf[i]->sqs_id = i;
1904 	}
1905 
1906 	return nicvf_mbox_request_sqs(nic);
1907 }
1908 
1909 static int
1910 nicvf_dev_configure(struct rte_eth_dev *dev)
1911 {
1912 	struct rte_eth_dev_data *data = dev->data;
1913 	struct rte_eth_conf *conf = &data->dev_conf;
1914 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1915 	struct rte_eth_txmode *txmode = &conf->txmode;
1916 	struct nicvf *nic = nicvf_pmd_priv(dev);
1917 	uint8_t cqcount;
1918 	uint64_t conf_rx_offloads, rx_offload_capa;
1919 	uint64_t conf_tx_offloads, tx_offload_capa;
1920 
1921 	PMD_INIT_FUNC_TRACE();
1922 
1923 	if (!rte_eal_has_hugepages()) {
1924 		PMD_INIT_LOG(INFO, "Huge page is not configured");
1925 		return -EINVAL;
1926 	}
1927 
1928 	conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
1929 	tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
1930 
1931 	if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
1932 		PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
1933 		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1934 		      conf_tx_offloads, tx_offload_capa);
1935 		return -ENOTSUP;
1936 	}
1937 
1938 	if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
1939 		PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
1940 		rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
1941 	}
1942 
1943 	conf_rx_offloads = rxmode->offloads;
1944 	rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
1945 
1946 	if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
1947 		PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
1948 		      "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
1949 		      conf_rx_offloads, rx_offload_capa);
1950 		return -ENOTSUP;
1951 	}
1952 
1953 	if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
1954 		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1955 		rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1956 	}
1957 
1958 	if (txmode->mq_mode) {
1959 		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1960 		return -EINVAL;
1961 	}
1962 
1963 	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1964 		rxmode->mq_mode != ETH_MQ_RX_RSS) {
1965 		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1966 		return -EINVAL;
1967 	}
1968 
1969 	if (rxmode->split_hdr_size) {
1970 		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1971 		return -EINVAL;
1972 	}
1973 
1974 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1975 		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1976 		return -EINVAL;
1977 	}
1978 
1979 	if (conf->dcb_capability_en) {
1980 		PMD_INIT_LOG(INFO, "DCB enable not supported");
1981 		return -EINVAL;
1982 	}
1983 
1984 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1985 		PMD_INIT_LOG(INFO, "Flow director not supported");
1986 		return -EINVAL;
1987 	}
1988 
1989 	assert_primary(nic);
1990 	NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1991 	cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1992 	if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1993 		nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1994 		nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1995 	} else {
1996 		nic->sqs_count = 0;
1997 	}
1998 
1999 	assert(nic->sqs_count <= MAX_SQS_PER_VF);
2000 
2001 	if (nic->sqs_count > 0) {
2002 		if (nicvf_request_sqs(nic)) {
2003 			rte_panic("Cannot assign sufficient number of "
2004 				  "secondary queues to PORT%d VF%" PRIu8 "\n",
2005 				  dev->data->port_id, nic->vf_id);
2006 		}
2007 	}
2008 
2009 	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
2010 		dev->data->port_id, nicvf_hw_cap(nic));
2011 
2012 	return 0;
2013 }
2014 
2015 /* Initialize and register driver with DPDK Application */
2016 static const struct eth_dev_ops nicvf_eth_dev_ops = {
2017 	.dev_configure            = nicvf_dev_configure,
2018 	.dev_start                = nicvf_dev_start,
2019 	.dev_stop                 = nicvf_dev_stop,
2020 	.link_update              = nicvf_dev_link_update,
2021 	.dev_close                = nicvf_dev_close,
2022 	.stats_get                = nicvf_dev_stats_get,
2023 	.stats_reset              = nicvf_dev_stats_reset,
2024 	.promiscuous_enable       = nicvf_dev_promisc_enable,
2025 	.dev_infos_get            = nicvf_dev_info_get,
2026 	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
2027 	.mtu_set                  = nicvf_dev_set_mtu,
2028 	.reta_update              = nicvf_dev_reta_update,
2029 	.reta_query               = nicvf_dev_reta_query,
2030 	.rss_hash_update          = nicvf_dev_rss_hash_update,
2031 	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
2032 	.rx_queue_start           = nicvf_dev_rx_queue_start,
2033 	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
2034 	.tx_queue_start           = nicvf_dev_tx_queue_start,
2035 	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
2036 	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
2037 	.rx_queue_release         = nicvf_dev_rx_queue_release,
2038 	.rx_queue_count           = nicvf_dev_rx_queue_count,
2039 	.tx_queue_setup           = nicvf_dev_tx_queue_setup,
2040 	.tx_queue_release         = nicvf_dev_tx_queue_release,
2041 	.get_reg                  = nicvf_dev_get_regs,
2042 };
2043 
2044 static int
2045 nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
2046 {
2047 	int ret;
2048 	struct rte_pci_device *pci_dev;
2049 	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
2050 
2051 	PMD_INIT_FUNC_TRACE();
2052 
2053 	eth_dev->dev_ops = &nicvf_eth_dev_ops;
2054 
2055 	/* For secondary processes, the primary has done all the work */
2056 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2057 		if (nic) {
2058 			/* Setup callbacks for secondary process */
2059 			nicvf_set_tx_function(eth_dev);
2060 			nicvf_set_rx_function(eth_dev);
2061 			return 0;
2062 		} else {
2063 			/* If nic == NULL than it is secondary function
2064 			 * so ethdev need to be released by caller */
2065 			return ENOTSUP;
2066 		}
2067 	}
2068 
2069 	pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2070 	rte_eth_copy_pci_info(eth_dev, pci_dev);
2071 
2072 	nic->device_id = pci_dev->id.device_id;
2073 	nic->vendor_id = pci_dev->id.vendor_id;
2074 	nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
2075 	nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2076 
2077 	PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
2078 			pci_dev->id.vendor_id, pci_dev->id.device_id,
2079 			pci_dev->addr.domain, pci_dev->addr.bus,
2080 			pci_dev->addr.devid, pci_dev->addr.function);
2081 
2082 	nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
2083 	if (!nic->reg_base) {
2084 		PMD_INIT_LOG(ERR, "Failed to map BAR0");
2085 		ret = -ENODEV;
2086 		goto fail;
2087 	}
2088 
2089 	nicvf_disable_all_interrupts(nic);
2090 
2091 	ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2092 	if (ret) {
2093 		PMD_INIT_LOG(ERR, "Failed to start period alarm");
2094 		goto fail;
2095 	}
2096 
2097 	ret = nicvf_mbox_check_pf_ready(nic);
2098 	if (ret) {
2099 		PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2100 		goto alarm_fail;
2101 	} else {
2102 		PMD_INIT_LOG(INFO,
2103 			"node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2104 			nic->node, nic->vf_id,
2105 			nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2106 			nic->sqs_mode ? "true" : "false",
2107 			nic->loopback_supported ? "true" : "false"
2108 			);
2109 	}
2110 
2111 	ret = nicvf_base_init(nic);
2112 	if (ret) {
2113 		PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2114 		goto malloc_fail;
2115 	}
2116 
2117 	if (nic->sqs_mode) {
2118 		/* Push nic to stack of secondary vfs */
2119 		nicvf_svf_push(nic);
2120 
2121 		/* Steal nic pointer from the device for further reuse */
2122 		eth_dev->data->dev_private = NULL;
2123 
2124 		nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2125 		ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2126 		if (ret) {
2127 			PMD_INIT_LOG(ERR, "Failed to start period alarm");
2128 			goto fail;
2129 		}
2130 
2131 		/* Detach port by returning positive error number */
2132 		return ENOTSUP;
2133 	}
2134 
2135 	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2136 	if (eth_dev->data->mac_addrs == NULL) {
2137 		PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2138 		ret = -ENOMEM;
2139 		goto alarm_fail;
2140 	}
2141 	if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2142 		eth_random_addr(&nic->mac_addr[0]);
2143 
2144 	ether_addr_copy((struct ether_addr *)nic->mac_addr,
2145 			&eth_dev->data->mac_addrs[0]);
2146 
2147 	ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2148 	if (ret) {
2149 		PMD_INIT_LOG(ERR, "Failed to set mac addr");
2150 		goto malloc_fail;
2151 	}
2152 
2153 	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2154 		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2155 		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2156 		nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2157 
2158 	return 0;
2159 
2160 malloc_fail:
2161 	rte_free(eth_dev->data->mac_addrs);
2162 alarm_fail:
2163 	nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2164 fail:
2165 	return ret;
2166 }
2167 
2168 static const struct rte_pci_id pci_id_nicvf_map[] = {
2169 	{
2170 		.class_id = RTE_CLASS_ANY_ID,
2171 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2172 		.device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2173 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2174 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2175 	},
2176 	{
2177 		.class_id = RTE_CLASS_ANY_ID,
2178 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2179 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2180 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2181 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2182 	},
2183 	{
2184 		.class_id = RTE_CLASS_ANY_ID,
2185 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2186 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2187 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2188 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2189 	},
2190 	{
2191 		.class_id = RTE_CLASS_ANY_ID,
2192 		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2193 		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2194 		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2195 		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2196 	},
2197 	{
2198 		.vendor_id = 0,
2199 	},
2200 };
2201 
2202 static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2203 	struct rte_pci_device *pci_dev)
2204 {
2205 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
2206 		nicvf_eth_dev_init);
2207 }
2208 
2209 static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
2210 {
2211 	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
2212 }
2213 
2214 static struct rte_pci_driver rte_nicvf_pmd = {
2215 	.id_table = pci_id_nicvf_map,
2216 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_KEEP_MAPPED_RES |
2217 			RTE_PCI_DRV_INTR_LSC,
2218 	.probe = nicvf_eth_pci_probe,
2219 	.remove = nicvf_eth_pci_remove,
2220 };
2221 
2222 RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
2223 RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2224 RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
2225