xref: /dpdk/drivers/net/ntnic/ntnic_ethdev.c (revision 6019656d6f6848c83591f24867538311545776eb)
1c5cfe765SSerhii Iliushyk /*
2c5cfe765SSerhii Iliushyk  * SPDX-License-Identifier: BSD-3-Clause
3c5cfe765SSerhii Iliushyk  * Copyright(c) 2023 Napatech A/S
4c5cfe765SSerhii Iliushyk  */
5c5cfe765SSerhii Iliushyk 
6fe91ade9SDanylo Vodopianov #include <stdint.h>
7a1ba8c47SDanylo Vodopianov #include <stdarg.h>
8a1ba8c47SDanylo Vodopianov 
9a1ba8c47SDanylo Vodopianov #include <signal.h>
10fe91ade9SDanylo Vodopianov 
11c1c13953SSerhii Iliushyk #include <rte_eal.h>
12c1c13953SSerhii Iliushyk #include <rte_dev.h>
1378b8b4abSSerhii Iliushyk #include <rte_vfio.h>
14c5cfe765SSerhii Iliushyk #include <rte_ethdev.h>
15c5cfe765SSerhii Iliushyk #include <rte_bus_pci.h>
16c5cfe765SSerhii Iliushyk #include <ethdev_pci.h>
17fe91ade9SDanylo Vodopianov #include <rte_kvargs.h>
18fe91ade9SDanylo Vodopianov 
19fe91ade9SDanylo Vodopianov #include <sys/queue.h>
20c5cfe765SSerhii Iliushyk 
212407c755SSerhii Iliushyk #include "rte_spinlock.h"
22db80608fSSerhii Iliushyk #include "ntlog.h"
23c93ef6edSSerhii Iliushyk #include "ntdrv_4ga.h"
24c93ef6edSSerhii Iliushyk #include "ntos_drv.h"
25c93ef6edSSerhii Iliushyk #include "ntos_system.h"
2605aa6305SSerhii Iliushyk #include "nthw_fpga_instances.h"
27c1c13953SSerhii Iliushyk #include "ntnic_vfio.h"
28d3dc3627SSerhii Iliushyk #include "ntnic_mod_reg.h"
296e6fd311SSerhii Iliushyk #include "nt_util.h"
30c0d44442SDanylo Vodopianov #include "profile_inline/flm_age_queue.h"
31e7e49ce6SDanylo Vodopianov #include "profile_inline/flm_evt_queue.h"
32e7e49ce6SDanylo Vodopianov #include "rte_pmd_ntnic.h"
33db80608fSSerhii Iliushyk 
3496c8249bSDanylo Vodopianov const rte_thread_attr_t thread_attr = { .priority = RTE_THREAD_PRIORITY_NORMAL };
35a1ba8c47SDanylo Vodopianov #define THREAD_CREATE(a, b, c) rte_thread_create(a, &thread_attr, b, c)
3696c8249bSDanylo Vodopianov #define THREAD_CTRL_CREATE(a, b, c, d) rte_thread_create_internal_control(a, b, c, d)
3796c8249bSDanylo Vodopianov #define THREAD_JOIN(a) rte_thread_join(a, NULL)
3896c8249bSDanylo Vodopianov #define THREAD_FUNC static uint32_t
3996c8249bSDanylo Vodopianov #define THREAD_RETURN (0)
409147e9f9SSerhii Iliushyk #define HW_MAX_PKT_LEN (10000)
419147e9f9SSerhii Iliushyk #define MAX_MTU (HW_MAX_PKT_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
42*6019656dSOleksandr Kolomeiets #define MIN_MTU_INLINE 512
439147e9f9SSerhii Iliushyk 
44c1c13953SSerhii Iliushyk #define EXCEPTION_PATH_HID 0
45c1c13953SSerhii Iliushyk 
46fe91ade9SDanylo Vodopianov #define MAX_TOTAL_QUEUES       128
47fe91ade9SDanylo Vodopianov 
486b0047faSDanylo Vodopianov #define SG_NB_HW_RX_DESCRIPTORS 1024
496b0047faSDanylo Vodopianov #define SG_NB_HW_TX_DESCRIPTORS 1024
506b0047faSDanylo Vodopianov #define SG_HW_RX_PKT_BUFFER_SIZE (1024 << 1)
516b0047faSDanylo Vodopianov #define SG_HW_TX_PKT_BUFFER_SIZE (1024 << 1)
526b0047faSDanylo Vodopianov 
53da25ae3cSDanylo Vodopianov #define NUM_VQ_SEGS(_data_size_)                                                                  \
54da25ae3cSDanylo Vodopianov 	({                                                                                        \
55da25ae3cSDanylo Vodopianov 		size_t _size = (_data_size_);                                                     \
56da25ae3cSDanylo Vodopianov 		size_t _segment_count = ((_size + SG_HDR_SIZE) > SG_HW_TX_PKT_BUFFER_SIZE)        \
57da25ae3cSDanylo Vodopianov 			? (((_size + SG_HDR_SIZE) + SG_HW_TX_PKT_BUFFER_SIZE - 1) /               \
58da25ae3cSDanylo Vodopianov 			   SG_HW_TX_PKT_BUFFER_SIZE)                                              \
59da25ae3cSDanylo Vodopianov 			: 1;                                                                      \
60da25ae3cSDanylo Vodopianov 		_segment_count;                                                                   \
61da25ae3cSDanylo Vodopianov 	})
62da25ae3cSDanylo Vodopianov 
63da25ae3cSDanylo Vodopianov #define VIRTQ_DESCR_IDX(_tx_pkt_idx_)                                                             \
64da25ae3cSDanylo Vodopianov 	(((_tx_pkt_idx_) + first_vq_descr_idx) % SG_NB_HW_TX_DESCRIPTORS)
65da25ae3cSDanylo Vodopianov 
66da25ae3cSDanylo Vodopianov #define VIRTQ_DESCR_IDX_NEXT(_vq_descr_idx_) (((_vq_descr_idx_) + 1) % SG_NB_HW_TX_DESCRIPTORS)
67da25ae3cSDanylo Vodopianov 
685284180aSDanylo Vodopianov #define ONE_G_SIZE  0x40000000
696b0047faSDanylo Vodopianov #define ONE_G_MASK  (ONE_G_SIZE - 1)
705284180aSDanylo Vodopianov 
71da25ae3cSDanylo Vodopianov #define MAX_RX_PACKETS   128
72da25ae3cSDanylo Vodopianov #define MAX_TX_PACKETS   128
73da25ae3cSDanylo Vodopianov 
74*6019656dSOleksandr Kolomeiets #define MTUINITVAL 1500
75*6019656dSOleksandr Kolomeiets 
76effa0469SDanylo Vodopianov uint64_t rte_tsc_freq;
77effa0469SDanylo Vodopianov 
78a1ba8c47SDanylo Vodopianov static void (*previous_handler)(int sig);
79a1ba8c47SDanylo Vodopianov static rte_thread_t shutdown_tid;
80a1ba8c47SDanylo Vodopianov 
81da25ae3cSDanylo Vodopianov int kill_pmd;
82da25ae3cSDanylo Vodopianov 
83fe91ade9SDanylo Vodopianov #define ETH_DEV_NTNIC_HELP_ARG "help"
84fe91ade9SDanylo Vodopianov #define ETH_DEV_NTHW_RXQUEUES_ARG "rxqs"
85fe91ade9SDanylo Vodopianov #define ETH_DEV_NTHW_TXQUEUES_ARG "txqs"
86fe91ade9SDanylo Vodopianov 
87fe91ade9SDanylo Vodopianov static const char *const valid_arguments[] = {
88fe91ade9SDanylo Vodopianov 	ETH_DEV_NTNIC_HELP_ARG,
89fe91ade9SDanylo Vodopianov 	ETH_DEV_NTHW_RXQUEUES_ARG,
90fe91ade9SDanylo Vodopianov 	ETH_DEV_NTHW_TXQUEUES_ARG,
91fe91ade9SDanylo Vodopianov 	NULL,
92fe91ade9SDanylo Vodopianov };
93fe91ade9SDanylo Vodopianov 
94fe91ade9SDanylo Vodopianov 
95c5cfe765SSerhii Iliushyk static const struct rte_pci_id nthw_pci_id_map[] = {
96894509edSSerhii Iliushyk 	{ RTE_PCI_DEVICE(NT_HW_PCI_VENDOR_ID, NT_HW_PCI_DEVICE_ID_NT200A02) },
97c5cfe765SSerhii Iliushyk 	{
98c5cfe765SSerhii Iliushyk 		.vendor_id = 0,
99c5cfe765SSerhii Iliushyk 	},	/* sentinel */
100c5cfe765SSerhii Iliushyk };
101c5cfe765SSerhii Iliushyk 
102b0cd36e9SDanylo Vodopianov static const struct sg_ops_s *sg_ops;
103b0cd36e9SDanylo Vodopianov 
104effa0469SDanylo Vodopianov rte_spinlock_t hwlock = RTE_SPINLOCK_INITIALIZER;
105c93ef6edSSerhii Iliushyk 
106c93ef6edSSerhii Iliushyk /*
107c93ef6edSSerhii Iliushyk  * Store and get adapter info
108c93ef6edSSerhii Iliushyk  */
109c93ef6edSSerhii Iliushyk 
110c93ef6edSSerhii Iliushyk static struct drv_s *_g_p_drv[NUM_ADAPTER_MAX] = { NULL };
111c93ef6edSSerhii Iliushyk 
112c93ef6edSSerhii Iliushyk static void
113c93ef6edSSerhii Iliushyk store_pdrv(struct drv_s *p_drv)
114c93ef6edSSerhii Iliushyk {
1153de5fe79SDanylo Vodopianov 	if (p_drv->adapter_no >= NUM_ADAPTER_MAX) {
116c93ef6edSSerhii Iliushyk 		NT_LOG(ERR, NTNIC,
1173489b87bSDanylo Vodopianov 			"Internal error adapter number %u out of range. Max number of adapters: %u",
118c93ef6edSSerhii Iliushyk 			p_drv->adapter_no, NUM_ADAPTER_MAX);
119c93ef6edSSerhii Iliushyk 		return;
120c93ef6edSSerhii Iliushyk 	}
121c93ef6edSSerhii Iliushyk 
122c93ef6edSSerhii Iliushyk 	if (_g_p_drv[p_drv->adapter_no] != 0) {
123c93ef6edSSerhii Iliushyk 		NT_LOG(WRN, NTNIC,
124c93ef6edSSerhii Iliushyk 			"Overwriting adapter structure for PCI  " PCIIDENT_PRINT_STR
1253489b87bSDanylo Vodopianov 			" with adapter structure for PCI  " PCIIDENT_PRINT_STR,
126c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_DOMAIN(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
127c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_BUSNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
128c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_DEVNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
129c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_FUNCNR(_g_p_drv[p_drv->adapter_no]->ntdrv.pciident),
130c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_DOMAIN(p_drv->ntdrv.pciident),
131c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_BUSNR(p_drv->ntdrv.pciident),
132c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_DEVNR(p_drv->ntdrv.pciident),
133c93ef6edSSerhii Iliushyk 			PCIIDENT_TO_FUNCNR(p_drv->ntdrv.pciident));
134c93ef6edSSerhii Iliushyk 	}
135c93ef6edSSerhii Iliushyk 
136c93ef6edSSerhii Iliushyk 	rte_spinlock_lock(&hwlock);
137c93ef6edSSerhii Iliushyk 	_g_p_drv[p_drv->adapter_no] = p_drv;
138c93ef6edSSerhii Iliushyk 	rte_spinlock_unlock(&hwlock);
139c93ef6edSSerhii Iliushyk }
140c93ef6edSSerhii Iliushyk 
14196c8249bSDanylo Vodopianov static void clear_pdrv(struct drv_s *p_drv)
14296c8249bSDanylo Vodopianov {
14396c8249bSDanylo Vodopianov 	if (p_drv->adapter_no > NUM_ADAPTER_MAX)
14496c8249bSDanylo Vodopianov 		return;
14596c8249bSDanylo Vodopianov 
14696c8249bSDanylo Vodopianov 	rte_spinlock_lock(&hwlock);
14796c8249bSDanylo Vodopianov 	_g_p_drv[p_drv->adapter_no] = NULL;
14896c8249bSDanylo Vodopianov 	rte_spinlock_unlock(&hwlock);
14996c8249bSDanylo Vodopianov }
15096c8249bSDanylo Vodopianov 
151c93ef6edSSerhii Iliushyk static struct drv_s *
152c93ef6edSSerhii Iliushyk get_pdrv_from_pci(struct rte_pci_addr addr)
153c93ef6edSSerhii Iliushyk {
154c93ef6edSSerhii Iliushyk 	int i;
155c93ef6edSSerhii Iliushyk 	struct drv_s *p_drv = NULL;
156c93ef6edSSerhii Iliushyk 	rte_spinlock_lock(&hwlock);
157c93ef6edSSerhii Iliushyk 
158c93ef6edSSerhii Iliushyk 	for (i = 0; i < NUM_ADAPTER_MAX; i++) {
159c93ef6edSSerhii Iliushyk 		if (_g_p_drv[i]) {
160c93ef6edSSerhii Iliushyk 			if (PCIIDENT_TO_DOMAIN(_g_p_drv[i]->ntdrv.pciident) == addr.domain &&
161c93ef6edSSerhii Iliushyk 				PCIIDENT_TO_BUSNR(_g_p_drv[i]->ntdrv.pciident) == addr.bus) {
162c93ef6edSSerhii Iliushyk 				p_drv = _g_p_drv[i];
163c93ef6edSSerhii Iliushyk 				break;
164c93ef6edSSerhii Iliushyk 			}
165c93ef6edSSerhii Iliushyk 		}
166c93ef6edSSerhii Iliushyk 	}
167c93ef6edSSerhii Iliushyk 
168c93ef6edSSerhii Iliushyk 	rte_spinlock_unlock(&hwlock);
169c93ef6edSSerhii Iliushyk 	return p_drv;
170c93ef6edSSerhii Iliushyk }
171c93ef6edSSerhii Iliushyk 
172effa0469SDanylo Vodopianov static int dpdk_stats_collect(struct pmd_internals *internals, struct rte_eth_stats *stats)
173effa0469SDanylo Vodopianov {
174effa0469SDanylo Vodopianov 	const struct ntnic_filter_ops *ntnic_filter_ops = get_ntnic_filter_ops();
175effa0469SDanylo Vodopianov 
176effa0469SDanylo Vodopianov 	if (ntnic_filter_ops == NULL) {
177effa0469SDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "ntnic_filter_ops uninitialized");
178effa0469SDanylo Vodopianov 		return -1;
179effa0469SDanylo Vodopianov 	}
180effa0469SDanylo Vodopianov 
181effa0469SDanylo Vodopianov 	unsigned int i;
182effa0469SDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
183effa0469SDanylo Vodopianov 	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
184effa0469SDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
185effa0469SDanylo Vodopianov 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
186effa0469SDanylo Vodopianov 	const int if_index = internals->n_intf_no;
187effa0469SDanylo Vodopianov 	uint64_t rx_total = 0;
188effa0469SDanylo Vodopianov 	uint64_t rx_total_b = 0;
189effa0469SDanylo Vodopianov 	uint64_t tx_total = 0;
190effa0469SDanylo Vodopianov 	uint64_t tx_total_b = 0;
191effa0469SDanylo Vodopianov 	uint64_t tx_err_total = 0;
192effa0469SDanylo Vodopianov 
193effa0469SDanylo Vodopianov 	if (!p_nthw_stat || !p_nt4ga_stat || !stats || if_index < 0 ||
194effa0469SDanylo Vodopianov 		if_index > NUM_ADAPTER_PORTS_MAX) {
195effa0469SDanylo Vodopianov 		NT_LOG_DBGX(WRN, NTNIC, "error exit");
196effa0469SDanylo Vodopianov 		return -1;
197effa0469SDanylo Vodopianov 	}
198effa0469SDanylo Vodopianov 
199effa0469SDanylo Vodopianov 	/*
200effa0469SDanylo Vodopianov 	 * Pull the latest port statistic numbers (Rx/Tx pkts and bytes)
201effa0469SDanylo Vodopianov 	 * Return values are in the "internals->rxq_scg[]" and "internals->txq_scg[]" arrays
202effa0469SDanylo Vodopianov 	 */
203effa0469SDanylo Vodopianov 	ntnic_filter_ops->poll_statistics(internals);
204effa0469SDanylo Vodopianov 
205effa0469SDanylo Vodopianov 	memset(stats, 0, sizeof(*stats));
206effa0469SDanylo Vodopianov 
207effa0469SDanylo Vodopianov 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_rx_queues; i++) {
208effa0469SDanylo Vodopianov 		stats->q_ipackets[i] = internals->rxq_scg[i].rx_pkts;
209effa0469SDanylo Vodopianov 		stats->q_ibytes[i] = internals->rxq_scg[i].rx_bytes;
210effa0469SDanylo Vodopianov 		rx_total += stats->q_ipackets[i];
211effa0469SDanylo Vodopianov 		rx_total_b += stats->q_ibytes[i];
212effa0469SDanylo Vodopianov 	}
213effa0469SDanylo Vodopianov 
214effa0469SDanylo Vodopianov 	for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < internals->nb_tx_queues; i++) {
215effa0469SDanylo Vodopianov 		stats->q_opackets[i] = internals->txq_scg[i].tx_pkts;
216effa0469SDanylo Vodopianov 		stats->q_obytes[i] = internals->txq_scg[i].tx_bytes;
217effa0469SDanylo Vodopianov 		stats->q_errors[i] = internals->txq_scg[i].err_pkts;
218effa0469SDanylo Vodopianov 		tx_total += stats->q_opackets[i];
219effa0469SDanylo Vodopianov 		tx_total_b += stats->q_obytes[i];
220effa0469SDanylo Vodopianov 		tx_err_total += stats->q_errors[i];
221effa0469SDanylo Vodopianov 	}
222effa0469SDanylo Vodopianov 
223effa0469SDanylo Vodopianov 	stats->imissed = internals->rx_missed;
224effa0469SDanylo Vodopianov 	stats->ipackets = rx_total;
225effa0469SDanylo Vodopianov 	stats->ibytes = rx_total_b;
226effa0469SDanylo Vodopianov 	stats->opackets = tx_total;
227effa0469SDanylo Vodopianov 	stats->obytes = tx_total_b;
228effa0469SDanylo Vodopianov 	stats->oerrors = tx_err_total;
229effa0469SDanylo Vodopianov 
230effa0469SDanylo Vodopianov 	return 0;
231effa0469SDanylo Vodopianov }
232effa0469SDanylo Vodopianov 
233effa0469SDanylo Vodopianov static int dpdk_stats_reset(struct pmd_internals *internals, struct ntdrv_4ga_s *p_nt_drv,
234effa0469SDanylo Vodopianov 	int n_intf_no)
235effa0469SDanylo Vodopianov {
236effa0469SDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
237effa0469SDanylo Vodopianov 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
238effa0469SDanylo Vodopianov 	unsigned int i;
239effa0469SDanylo Vodopianov 
240effa0469SDanylo Vodopianov 	if (!p_nthw_stat || !p_nt4ga_stat || n_intf_no < 0 || n_intf_no > NUM_ADAPTER_PORTS_MAX)
241effa0469SDanylo Vodopianov 		return -1;
242effa0469SDanylo Vodopianov 
2432407c755SSerhii Iliushyk 	rte_spinlock_lock(&p_nt_drv->stat_lck);
244effa0469SDanylo Vodopianov 
245effa0469SDanylo Vodopianov 	/* Rx */
246effa0469SDanylo Vodopianov 	for (i = 0; i < internals->nb_rx_queues; i++) {
247effa0469SDanylo Vodopianov 		internals->rxq_scg[i].rx_pkts = 0;
248effa0469SDanylo Vodopianov 		internals->rxq_scg[i].rx_bytes = 0;
249effa0469SDanylo Vodopianov 		internals->rxq_scg[i].err_pkts = 0;
250effa0469SDanylo Vodopianov 	}
251effa0469SDanylo Vodopianov 
252effa0469SDanylo Vodopianov 	internals->rx_missed = 0;
253effa0469SDanylo Vodopianov 
254effa0469SDanylo Vodopianov 	/* Tx */
255effa0469SDanylo Vodopianov 	for (i = 0; i < internals->nb_tx_queues; i++) {
256effa0469SDanylo Vodopianov 		internals->txq_scg[i].tx_pkts = 0;
257effa0469SDanylo Vodopianov 		internals->txq_scg[i].tx_bytes = 0;
258effa0469SDanylo Vodopianov 		internals->txq_scg[i].err_pkts = 0;
259effa0469SDanylo Vodopianov 	}
260effa0469SDanylo Vodopianov 
261effa0469SDanylo Vodopianov 	p_nt4ga_stat->n_totals_reset_timestamp = time(NULL);
262effa0469SDanylo Vodopianov 
2632407c755SSerhii Iliushyk 	rte_spinlock_unlock(&p_nt_drv->stat_lck);
264effa0469SDanylo Vodopianov 
265effa0469SDanylo Vodopianov 	return 0;
266effa0469SDanylo Vodopianov }
267effa0469SDanylo Vodopianov 
268c93ef6edSSerhii Iliushyk static int
2699147e9f9SSerhii Iliushyk eth_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused)
270c93ef6edSSerhii Iliushyk {
2719147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
2729147e9f9SSerhii Iliushyk 
2739147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
2743489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
2759147e9f9SSerhii Iliushyk 		return -1;
2769147e9f9SSerhii Iliushyk 	}
2779147e9f9SSerhii Iliushyk 
278e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
279c93ef6edSSerhii Iliushyk 
2809147e9f9SSerhii Iliushyk 	const int n_intf_no = internals->n_intf_no;
2819147e9f9SSerhii Iliushyk 	struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
2829147e9f9SSerhii Iliushyk 
2839147e9f9SSerhii Iliushyk 	if (eth_dev->data->dev_started) {
2849147e9f9SSerhii Iliushyk 		const bool port_link_status = port_ops->get_link_status(p_adapter_info, n_intf_no);
2859147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_status =
2869147e9f9SSerhii Iliushyk 			port_link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
2879147e9f9SSerhii Iliushyk 
2889147e9f9SSerhii Iliushyk 		nt_link_speed_t port_link_speed =
2899147e9f9SSerhii Iliushyk 			port_ops->get_link_speed(p_adapter_info, n_intf_no);
2909147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_speed =
2919147e9f9SSerhii Iliushyk 			nt_link_speed_to_eth_speed_num(port_link_speed);
2929147e9f9SSerhii Iliushyk 
2939147e9f9SSerhii Iliushyk 		nt_link_duplex_t nt_link_duplex =
2949147e9f9SSerhii Iliushyk 			port_ops->get_link_duplex(p_adapter_info, n_intf_no);
2959147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_duplex = nt_link_duplex_to_eth_duplex(nt_link_duplex);
2969147e9f9SSerhii Iliushyk 
2979147e9f9SSerhii Iliushyk 	} else {
2989147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
2999147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
3009147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
3019147e9f9SSerhii Iliushyk 	}
3029147e9f9SSerhii Iliushyk 
3039147e9f9SSerhii Iliushyk 	return 0;
3049147e9f9SSerhii Iliushyk }
3059147e9f9SSerhii Iliushyk 
306effa0469SDanylo Vodopianov static int eth_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
307effa0469SDanylo Vodopianov {
308e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
309effa0469SDanylo Vodopianov 	dpdk_stats_collect(internals, stats);
310effa0469SDanylo Vodopianov 	return 0;
311effa0469SDanylo Vodopianov }
312effa0469SDanylo Vodopianov 
313effa0469SDanylo Vodopianov static int eth_stats_reset(struct rte_eth_dev *eth_dev)
314effa0469SDanylo Vodopianov {
315e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
316effa0469SDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
317effa0469SDanylo Vodopianov 	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
318effa0469SDanylo Vodopianov 	const int if_index = internals->n_intf_no;
319effa0469SDanylo Vodopianov 	dpdk_stats_reset(internals, p_nt_drv, if_index);
320effa0469SDanylo Vodopianov 	return 0;
321effa0469SDanylo Vodopianov }
322effa0469SDanylo Vodopianov 
3239147e9f9SSerhii Iliushyk static int
3249147e9f9SSerhii Iliushyk eth_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *dev_info)
3259147e9f9SSerhii Iliushyk {
3269147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
3279147e9f9SSerhii Iliushyk 
3289147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
3293489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
3309147e9f9SSerhii Iliushyk 		return -1;
3319147e9f9SSerhii Iliushyk 	}
3329147e9f9SSerhii Iliushyk 
333e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
3349147e9f9SSerhii Iliushyk 
3359147e9f9SSerhii Iliushyk 	const int n_intf_no = internals->n_intf_no;
3369147e9f9SSerhii Iliushyk 	struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
3379147e9f9SSerhii Iliushyk 
338c93ef6edSSerhii Iliushyk 	dev_info->driver_name = internals->name;
3399147e9f9SSerhii Iliushyk 	dev_info->max_mac_addrs = NUM_MAC_ADDRS_PER_PORT;
3409147e9f9SSerhii Iliushyk 	dev_info->max_rx_pktlen = HW_MAX_PKT_LEN;
3419147e9f9SSerhii Iliushyk 	dev_info->max_mtu = MAX_MTU;
3429147e9f9SSerhii Iliushyk 
3438eed292bSSerhii Iliushyk 	if (p_adapter_info->fpga_info.profile == FPGA_INFO_PROFILE_INLINE) {
344*6019656dSOleksandr Kolomeiets 		dev_info->min_mtu = MIN_MTU_INLINE;
3458eed292bSSerhii Iliushyk 		dev_info->flow_type_rss_offloads = NT_ETH_RSS_OFFLOAD_MASK;
3468eed292bSSerhii Iliushyk 		dev_info->hash_key_size = MAX_RSS_KEY_LEN;
3478eed292bSSerhii Iliushyk 
3488eed292bSSerhii Iliushyk 		dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT) |
3498eed292bSSerhii Iliushyk 			RTE_ETH_HASH_ALGO_CAPA_MASK(TOEPLITZ);
3508eed292bSSerhii Iliushyk 	}
3518eed292bSSerhii Iliushyk 
3529147e9f9SSerhii Iliushyk 	if (internals->p_drv) {
3539147e9f9SSerhii Iliushyk 		dev_info->max_rx_queues = internals->nb_rx_queues;
3549147e9f9SSerhii Iliushyk 		dev_info->max_tx_queues = internals->nb_tx_queues;
3559147e9f9SSerhii Iliushyk 
3569147e9f9SSerhii Iliushyk 		dev_info->min_rx_bufsize = 64;
3579147e9f9SSerhii Iliushyk 
3589147e9f9SSerhii Iliushyk 		const uint32_t nt_port_speed_capa =
3599147e9f9SSerhii Iliushyk 			port_ops->get_link_speed_capabilities(p_adapter_info, n_intf_no);
3609147e9f9SSerhii Iliushyk 		dev_info->speed_capa = nt_link_speed_capa_to_eth_speed_capa(nt_port_speed_capa);
3619147e9f9SSerhii Iliushyk 	}
3629147e9f9SSerhii Iliushyk 
3639147e9f9SSerhii Iliushyk 	return 0;
3649147e9f9SSerhii Iliushyk }
3659147e9f9SSerhii Iliushyk 
366da25ae3cSDanylo Vodopianov static __rte_always_inline int copy_virtqueue_to_mbuf(struct rte_mbuf *mbuf,
367da25ae3cSDanylo Vodopianov 	struct rte_mempool *mb_pool,
368da25ae3cSDanylo Vodopianov 	struct nthw_received_packets *hw_recv,
369da25ae3cSDanylo Vodopianov 	int max_segs,
370da25ae3cSDanylo Vodopianov 	uint16_t data_len)
371da25ae3cSDanylo Vodopianov {
372da25ae3cSDanylo Vodopianov 	int src_pkt = 0;
373da25ae3cSDanylo Vodopianov 	/*
374da25ae3cSDanylo Vodopianov 	 * 1. virtqueue packets may be segmented
375da25ae3cSDanylo Vodopianov 	 * 2. the mbuf size may be too small and may need to be segmented
376da25ae3cSDanylo Vodopianov 	 */
377da25ae3cSDanylo Vodopianov 	char *data = (char *)hw_recv->addr + SG_HDR_SIZE;
378da25ae3cSDanylo Vodopianov 	char *dst = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
379da25ae3cSDanylo Vodopianov 
380da25ae3cSDanylo Vodopianov 	/* set packet length */
381da25ae3cSDanylo Vodopianov 	mbuf->pkt_len = data_len - SG_HDR_SIZE;
382da25ae3cSDanylo Vodopianov 
383da25ae3cSDanylo Vodopianov 	int remain = mbuf->pkt_len;
384da25ae3cSDanylo Vodopianov 	/* First cpy_size is without header */
385da25ae3cSDanylo Vodopianov 	int cpy_size = (data_len > SG_HW_RX_PKT_BUFFER_SIZE)
386da25ae3cSDanylo Vodopianov 		? SG_HW_RX_PKT_BUFFER_SIZE - SG_HDR_SIZE
387da25ae3cSDanylo Vodopianov 		: remain;
388da25ae3cSDanylo Vodopianov 
389da25ae3cSDanylo Vodopianov 	struct rte_mbuf *m = mbuf;	/* if mbuf segmentation is needed */
390da25ae3cSDanylo Vodopianov 
391da25ae3cSDanylo Vodopianov 	while (++src_pkt <= max_segs) {
392da25ae3cSDanylo Vodopianov 		/* keep track of space in dst */
393da25ae3cSDanylo Vodopianov 		int cpto_size = rte_pktmbuf_tailroom(m);
394da25ae3cSDanylo Vodopianov 
395da25ae3cSDanylo Vodopianov 		if (cpy_size > cpto_size) {
396da25ae3cSDanylo Vodopianov 			int new_cpy_size = cpto_size;
397da25ae3cSDanylo Vodopianov 
398da25ae3cSDanylo Vodopianov 			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
399da25ae3cSDanylo Vodopianov 			m->data_len += new_cpy_size;
400da25ae3cSDanylo Vodopianov 			remain -= new_cpy_size;
401da25ae3cSDanylo Vodopianov 			cpy_size -= new_cpy_size;
402da25ae3cSDanylo Vodopianov 
403da25ae3cSDanylo Vodopianov 			data += new_cpy_size;
404da25ae3cSDanylo Vodopianov 
405da25ae3cSDanylo Vodopianov 			/*
406da25ae3cSDanylo Vodopianov 			 * loop if remaining data from this virtqueue seg
407da25ae3cSDanylo Vodopianov 			 * cannot fit in one extra mbuf
408da25ae3cSDanylo Vodopianov 			 */
409da25ae3cSDanylo Vodopianov 			do {
410da25ae3cSDanylo Vodopianov 				m->next = rte_pktmbuf_alloc(mb_pool);
411da25ae3cSDanylo Vodopianov 
412da25ae3cSDanylo Vodopianov 				if (unlikely(!m->next))
413da25ae3cSDanylo Vodopianov 					return -1;
414da25ae3cSDanylo Vodopianov 
415da25ae3cSDanylo Vodopianov 				m = m->next;
416da25ae3cSDanylo Vodopianov 
417da25ae3cSDanylo Vodopianov 				/* Headroom is not needed in chained mbufs */
418da25ae3cSDanylo Vodopianov 				rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
419da25ae3cSDanylo Vodopianov 				dst = (char *)m->buf_addr;
420da25ae3cSDanylo Vodopianov 				m->data_len = 0;
421da25ae3cSDanylo Vodopianov 				m->pkt_len = 0;
422da25ae3cSDanylo Vodopianov 
423da25ae3cSDanylo Vodopianov 				cpto_size = rte_pktmbuf_tailroom(m);
424da25ae3cSDanylo Vodopianov 
425da25ae3cSDanylo Vodopianov 				int actual_cpy_size =
426da25ae3cSDanylo Vodopianov 					(cpy_size > cpto_size) ? cpto_size : cpy_size;
427da25ae3cSDanylo Vodopianov 
428da25ae3cSDanylo Vodopianov 				rte_memcpy((void *)dst, (void *)data, actual_cpy_size);
429da25ae3cSDanylo Vodopianov 				m->pkt_len += actual_cpy_size;
430da25ae3cSDanylo Vodopianov 				m->data_len += actual_cpy_size;
431da25ae3cSDanylo Vodopianov 
432da25ae3cSDanylo Vodopianov 				remain -= actual_cpy_size;
433da25ae3cSDanylo Vodopianov 				cpy_size -= actual_cpy_size;
434da25ae3cSDanylo Vodopianov 
435da25ae3cSDanylo Vodopianov 				data += actual_cpy_size;
436da25ae3cSDanylo Vodopianov 
437da25ae3cSDanylo Vodopianov 				mbuf->nb_segs++;
438da25ae3cSDanylo Vodopianov 
439da25ae3cSDanylo Vodopianov 			} while (cpy_size && remain);
440da25ae3cSDanylo Vodopianov 
441da25ae3cSDanylo Vodopianov 		} else {
442da25ae3cSDanylo Vodopianov 			/* all data from this virtqueue segment can fit in current mbuf */
443da25ae3cSDanylo Vodopianov 			rte_memcpy((void *)dst, (void *)data, cpy_size);
444da25ae3cSDanylo Vodopianov 			m->data_len += cpy_size;
445da25ae3cSDanylo Vodopianov 
446da25ae3cSDanylo Vodopianov 			if (mbuf->nb_segs > 1)
447da25ae3cSDanylo Vodopianov 				m->pkt_len += cpy_size;
448da25ae3cSDanylo Vodopianov 
449da25ae3cSDanylo Vodopianov 			remain -= cpy_size;
450da25ae3cSDanylo Vodopianov 		}
451da25ae3cSDanylo Vodopianov 
452da25ae3cSDanylo Vodopianov 		/* packet complete - all data from current virtqueue packet has been copied */
453da25ae3cSDanylo Vodopianov 		if (remain == 0)
454da25ae3cSDanylo Vodopianov 			break;
455da25ae3cSDanylo Vodopianov 
456da25ae3cSDanylo Vodopianov 		/* increment dst to data end */
457da25ae3cSDanylo Vodopianov 		dst = rte_pktmbuf_mtod_offset(m, char *, m->data_len);
458da25ae3cSDanylo Vodopianov 		/* prepare for next virtqueue segment */
459da25ae3cSDanylo Vodopianov 		data = (char *)hw_recv[src_pkt].addr;	/* following packets are full data */
460da25ae3cSDanylo Vodopianov 
461da25ae3cSDanylo Vodopianov 		cpy_size = (remain > SG_HW_RX_PKT_BUFFER_SIZE) ? SG_HW_RX_PKT_BUFFER_SIZE : remain;
462da25ae3cSDanylo Vodopianov 	};
463da25ae3cSDanylo Vodopianov 
464da25ae3cSDanylo Vodopianov 	if (src_pkt > max_segs) {
465da25ae3cSDanylo Vodopianov 		NT_LOG(ERR, NTNIC,
466da25ae3cSDanylo Vodopianov 			"Did not receive correct number of segment for a whole packet");
467da25ae3cSDanylo Vodopianov 		return -1;
468da25ae3cSDanylo Vodopianov 	}
469da25ae3cSDanylo Vodopianov 
470da25ae3cSDanylo Vodopianov 	return src_pkt;
471da25ae3cSDanylo Vodopianov }
472da25ae3cSDanylo Vodopianov 
473da25ae3cSDanylo Vodopianov static uint16_t eth_dev_rx_scg(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
474da25ae3cSDanylo Vodopianov {
475da25ae3cSDanylo Vodopianov 	unsigned int i;
476da25ae3cSDanylo Vodopianov 	struct rte_mbuf *mbuf;
477da25ae3cSDanylo Vodopianov 	struct ntnic_rx_queue *rx_q = queue;
478da25ae3cSDanylo Vodopianov 	uint16_t num_rx = 0;
479da25ae3cSDanylo Vodopianov 
480da25ae3cSDanylo Vodopianov 	struct nthw_received_packets hw_recv[MAX_RX_PACKETS];
481da25ae3cSDanylo Vodopianov 
482da25ae3cSDanylo Vodopianov 	if (kill_pmd)
483da25ae3cSDanylo Vodopianov 		return 0;
484da25ae3cSDanylo Vodopianov 
485da25ae3cSDanylo Vodopianov 	if (unlikely(nb_pkts == 0))
486da25ae3cSDanylo Vodopianov 		return 0;
487da25ae3cSDanylo Vodopianov 
488da25ae3cSDanylo Vodopianov 	if (nb_pkts > MAX_RX_PACKETS)
489da25ae3cSDanylo Vodopianov 		nb_pkts = MAX_RX_PACKETS;
490da25ae3cSDanylo Vodopianov 
491da25ae3cSDanylo Vodopianov 	uint16_t whole_pkts = 0;
492da25ae3cSDanylo Vodopianov 	uint16_t hw_recv_pkt_segs = 0;
493da25ae3cSDanylo Vodopianov 
494da25ae3cSDanylo Vodopianov 	if (sg_ops != NULL) {
495da25ae3cSDanylo Vodopianov 		hw_recv_pkt_segs =
496da25ae3cSDanylo Vodopianov 			sg_ops->nthw_get_rx_packets(rx_q->vq, nb_pkts, hw_recv, &whole_pkts);
497da25ae3cSDanylo Vodopianov 
498da25ae3cSDanylo Vodopianov 		if (!hw_recv_pkt_segs)
499da25ae3cSDanylo Vodopianov 			return 0;
500da25ae3cSDanylo Vodopianov 	}
501da25ae3cSDanylo Vodopianov 
502da25ae3cSDanylo Vodopianov 	nb_pkts = whole_pkts;
503da25ae3cSDanylo Vodopianov 
504da25ae3cSDanylo Vodopianov 	int src_pkt = 0;/* from 0 to hw_recv_pkt_segs */
505da25ae3cSDanylo Vodopianov 
506da25ae3cSDanylo Vodopianov 	for (i = 0; i < nb_pkts; i++) {
507da25ae3cSDanylo Vodopianov 		bufs[i] = rte_pktmbuf_alloc(rx_q->mb_pool);
508da25ae3cSDanylo Vodopianov 
509da25ae3cSDanylo Vodopianov 		if (!bufs[i]) {
510da25ae3cSDanylo Vodopianov 			NT_LOG(ERR, NTNIC, "ERROR - no more buffers mbuf in mempool");
511da25ae3cSDanylo Vodopianov 			goto err_exit;
512da25ae3cSDanylo Vodopianov 		}
513da25ae3cSDanylo Vodopianov 
514da25ae3cSDanylo Vodopianov 		mbuf = bufs[i];
515da25ae3cSDanylo Vodopianov 
516da25ae3cSDanylo Vodopianov 		struct _pkt_hdr_rx *phdr = (struct _pkt_hdr_rx *)hw_recv[src_pkt].addr;
517da25ae3cSDanylo Vodopianov 
518da25ae3cSDanylo Vodopianov 		if (phdr->cap_len < SG_HDR_SIZE) {
519da25ae3cSDanylo Vodopianov 			NT_LOG(ERR, NTNIC,
520da25ae3cSDanylo Vodopianov 				"Pkt len of zero received. No header!! - dropping packets");
521da25ae3cSDanylo Vodopianov 			rte_pktmbuf_free(mbuf);
522da25ae3cSDanylo Vodopianov 			goto err_exit;
523da25ae3cSDanylo Vodopianov 		}
524da25ae3cSDanylo Vodopianov 
525da25ae3cSDanylo Vodopianov 		{
526da25ae3cSDanylo Vodopianov 			if (phdr->cap_len <= SG_HW_RX_PKT_BUFFER_SIZE &&
527da25ae3cSDanylo Vodopianov 				(phdr->cap_len - SG_HDR_SIZE) <= rte_pktmbuf_tailroom(mbuf)) {
528da25ae3cSDanylo Vodopianov 				mbuf->data_len = phdr->cap_len - SG_HDR_SIZE;
529da25ae3cSDanylo Vodopianov 				rte_memcpy(rte_pktmbuf_mtod(mbuf, char *),
530da25ae3cSDanylo Vodopianov 					(char *)hw_recv[src_pkt].addr + SG_HDR_SIZE,
531da25ae3cSDanylo Vodopianov 					mbuf->data_len);
532da25ae3cSDanylo Vodopianov 
533da25ae3cSDanylo Vodopianov 				mbuf->pkt_len = mbuf->data_len;
534da25ae3cSDanylo Vodopianov 				src_pkt++;
535da25ae3cSDanylo Vodopianov 
536da25ae3cSDanylo Vodopianov 			} else {
537da25ae3cSDanylo Vodopianov 				int cpy_segs = copy_virtqueue_to_mbuf(mbuf, rx_q->mb_pool,
538da25ae3cSDanylo Vodopianov 						&hw_recv[src_pkt],
539da25ae3cSDanylo Vodopianov 						hw_recv_pkt_segs - src_pkt,
540da25ae3cSDanylo Vodopianov 						phdr->cap_len);
541da25ae3cSDanylo Vodopianov 
542da25ae3cSDanylo Vodopianov 				if (cpy_segs < 0) {
543da25ae3cSDanylo Vodopianov 					/* Error */
544da25ae3cSDanylo Vodopianov 					rte_pktmbuf_free(mbuf);
545da25ae3cSDanylo Vodopianov 					goto err_exit;
546da25ae3cSDanylo Vodopianov 				}
547da25ae3cSDanylo Vodopianov 
548da25ae3cSDanylo Vodopianov 				src_pkt += cpy_segs;
549da25ae3cSDanylo Vodopianov 			}
550da25ae3cSDanylo Vodopianov 
551da25ae3cSDanylo Vodopianov 			num_rx++;
552da25ae3cSDanylo Vodopianov 
553da25ae3cSDanylo Vodopianov 			mbuf->ol_flags &= ~(RTE_MBUF_F_RX_FDIR_ID | RTE_MBUF_F_RX_FDIR);
554da25ae3cSDanylo Vodopianov 			mbuf->port = (uint16_t)-1;
555da25ae3cSDanylo Vodopianov 		}
556da25ae3cSDanylo Vodopianov 	}
557da25ae3cSDanylo Vodopianov 
558da25ae3cSDanylo Vodopianov err_exit:
559da25ae3cSDanylo Vodopianov 
560da25ae3cSDanylo Vodopianov 	if (sg_ops != NULL)
561da25ae3cSDanylo Vodopianov 		sg_ops->nthw_release_rx_packets(rx_q->vq, hw_recv_pkt_segs);
562da25ae3cSDanylo Vodopianov 
563da25ae3cSDanylo Vodopianov 	return num_rx;
564da25ae3cSDanylo Vodopianov }
565da25ae3cSDanylo Vodopianov 
566da25ae3cSDanylo Vodopianov static int copy_mbuf_to_virtqueue(struct nthw_cvirtq_desc *cvq_desc,
567da25ae3cSDanylo Vodopianov 	uint16_t vq_descr_idx,
568da25ae3cSDanylo Vodopianov 	struct nthw_memory_descriptor *vq_bufs,
569da25ae3cSDanylo Vodopianov 	int max_segs,
570da25ae3cSDanylo Vodopianov 	struct rte_mbuf *mbuf)
571da25ae3cSDanylo Vodopianov {
572da25ae3cSDanylo Vodopianov 	/*
573da25ae3cSDanylo Vodopianov 	 * 1. mbuf packet may be segmented
574da25ae3cSDanylo Vodopianov 	 * 2. the virtqueue buffer size may be too small and may need to be segmented
575da25ae3cSDanylo Vodopianov 	 */
576da25ae3cSDanylo Vodopianov 
577da25ae3cSDanylo Vodopianov 	char *data = rte_pktmbuf_mtod(mbuf, char *);
578da25ae3cSDanylo Vodopianov 	char *dst = (char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE;
579da25ae3cSDanylo Vodopianov 
580da25ae3cSDanylo Vodopianov 	int remain = mbuf->pkt_len;
581da25ae3cSDanylo Vodopianov 	int cpy_size = mbuf->data_len;
582da25ae3cSDanylo Vodopianov 
583da25ae3cSDanylo Vodopianov 	struct rte_mbuf *m = mbuf;
584da25ae3cSDanylo Vodopianov 	int cpto_size = SG_HW_TX_PKT_BUFFER_SIZE - SG_HDR_SIZE;
585da25ae3cSDanylo Vodopianov 
586da25ae3cSDanylo Vodopianov 	cvq_desc->b[vq_descr_idx].len = SG_HDR_SIZE;
587da25ae3cSDanylo Vodopianov 
588da25ae3cSDanylo Vodopianov 	int cur_seg_num = 0;	/* start from 0 */
589da25ae3cSDanylo Vodopianov 
590da25ae3cSDanylo Vodopianov 	while (m) {
591da25ae3cSDanylo Vodopianov 		/* Can all data in current src segment be in current dest segment */
592da25ae3cSDanylo Vodopianov 		if (cpy_size > cpto_size) {
593da25ae3cSDanylo Vodopianov 			int new_cpy_size = cpto_size;
594da25ae3cSDanylo Vodopianov 
595da25ae3cSDanylo Vodopianov 			rte_memcpy((void *)dst, (void *)data, new_cpy_size);
596da25ae3cSDanylo Vodopianov 
597da25ae3cSDanylo Vodopianov 			cvq_desc->b[vq_descr_idx].len += new_cpy_size;
598da25ae3cSDanylo Vodopianov 
599da25ae3cSDanylo Vodopianov 			remain -= new_cpy_size;
600da25ae3cSDanylo Vodopianov 			cpy_size -= new_cpy_size;
601da25ae3cSDanylo Vodopianov 
602da25ae3cSDanylo Vodopianov 			data += new_cpy_size;
603da25ae3cSDanylo Vodopianov 
604da25ae3cSDanylo Vodopianov 			/*
605da25ae3cSDanylo Vodopianov 			 * Loop if remaining data from this virtqueue seg cannot fit in one extra
606da25ae3cSDanylo Vodopianov 			 * mbuf
607da25ae3cSDanylo Vodopianov 			 */
608da25ae3cSDanylo Vodopianov 			do {
609da25ae3cSDanylo Vodopianov 				vq_add_flags(cvq_desc, vq_descr_idx, VIRTQ_DESC_F_NEXT);
610da25ae3cSDanylo Vodopianov 
611da25ae3cSDanylo Vodopianov 				int next_vq_descr_idx = VIRTQ_DESCR_IDX_NEXT(vq_descr_idx);
612da25ae3cSDanylo Vodopianov 
613da25ae3cSDanylo Vodopianov 				vq_set_next(cvq_desc, vq_descr_idx, next_vq_descr_idx);
614da25ae3cSDanylo Vodopianov 
615da25ae3cSDanylo Vodopianov 				vq_descr_idx = next_vq_descr_idx;
616da25ae3cSDanylo Vodopianov 
617da25ae3cSDanylo Vodopianov 				vq_set_flags(cvq_desc, vq_descr_idx, 0);
618da25ae3cSDanylo Vodopianov 				vq_set_next(cvq_desc, vq_descr_idx, 0);
619da25ae3cSDanylo Vodopianov 
620da25ae3cSDanylo Vodopianov 				if (++cur_seg_num > max_segs)
621da25ae3cSDanylo Vodopianov 					break;
622da25ae3cSDanylo Vodopianov 
623da25ae3cSDanylo Vodopianov 				dst = (char *)vq_bufs[vq_descr_idx].virt_addr;
624da25ae3cSDanylo Vodopianov 				cpto_size = SG_HW_TX_PKT_BUFFER_SIZE;
625da25ae3cSDanylo Vodopianov 
626da25ae3cSDanylo Vodopianov 				int actual_cpy_size =
627da25ae3cSDanylo Vodopianov 					(cpy_size > cpto_size) ? cpto_size : cpy_size;
628da25ae3cSDanylo Vodopianov 				rte_memcpy((void *)dst, (void *)data, actual_cpy_size);
629da25ae3cSDanylo Vodopianov 
630da25ae3cSDanylo Vodopianov 				cvq_desc->b[vq_descr_idx].len = actual_cpy_size;
631da25ae3cSDanylo Vodopianov 
632da25ae3cSDanylo Vodopianov 				remain -= actual_cpy_size;
633da25ae3cSDanylo Vodopianov 				cpy_size -= actual_cpy_size;
634da25ae3cSDanylo Vodopianov 				cpto_size -= actual_cpy_size;
635da25ae3cSDanylo Vodopianov 
636da25ae3cSDanylo Vodopianov 				data += actual_cpy_size;
637da25ae3cSDanylo Vodopianov 
638da25ae3cSDanylo Vodopianov 			} while (cpy_size && remain);
639da25ae3cSDanylo Vodopianov 
640da25ae3cSDanylo Vodopianov 		} else {
641da25ae3cSDanylo Vodopianov 			/* All data from this segment can fit in current virtqueue buffer */
642da25ae3cSDanylo Vodopianov 			rte_memcpy((void *)dst, (void *)data, cpy_size);
643da25ae3cSDanylo Vodopianov 
644da25ae3cSDanylo Vodopianov 			cvq_desc->b[vq_descr_idx].len += cpy_size;
645da25ae3cSDanylo Vodopianov 
646da25ae3cSDanylo Vodopianov 			remain -= cpy_size;
647da25ae3cSDanylo Vodopianov 			cpto_size -= cpy_size;
648da25ae3cSDanylo Vodopianov 		}
649da25ae3cSDanylo Vodopianov 
650da25ae3cSDanylo Vodopianov 		/* Packet complete - all segments from current mbuf has been copied */
651da25ae3cSDanylo Vodopianov 		if (remain == 0)
652da25ae3cSDanylo Vodopianov 			break;
653da25ae3cSDanylo Vodopianov 
654da25ae3cSDanylo Vodopianov 		/* increment dst to data end */
655da25ae3cSDanylo Vodopianov 		dst = (char *)vq_bufs[vq_descr_idx].virt_addr + cvq_desc->b[vq_descr_idx].len;
656da25ae3cSDanylo Vodopianov 
657da25ae3cSDanylo Vodopianov 		m = m->next;
658da25ae3cSDanylo Vodopianov 
659da25ae3cSDanylo Vodopianov 		if (!m) {
660da25ae3cSDanylo Vodopianov 			NT_LOG(ERR, NTNIC, "ERROR: invalid packet size");
661da25ae3cSDanylo Vodopianov 			break;
662da25ae3cSDanylo Vodopianov 		}
663da25ae3cSDanylo Vodopianov 
664da25ae3cSDanylo Vodopianov 		/* Prepare for next mbuf segment */
665da25ae3cSDanylo Vodopianov 		data = rte_pktmbuf_mtod(m, char *);
666da25ae3cSDanylo Vodopianov 		cpy_size = m->data_len;
667da25ae3cSDanylo Vodopianov 	};
668da25ae3cSDanylo Vodopianov 
669da25ae3cSDanylo Vodopianov 	cur_seg_num++;
670da25ae3cSDanylo Vodopianov 
671da25ae3cSDanylo Vodopianov 	if (cur_seg_num > max_segs) {
672da25ae3cSDanylo Vodopianov 		NT_LOG(ERR, NTNIC,
673da25ae3cSDanylo Vodopianov 			"Did not receive correct number of segment for a whole packet");
674da25ae3cSDanylo Vodopianov 		return -1;
675da25ae3cSDanylo Vodopianov 	}
676da25ae3cSDanylo Vodopianov 
677da25ae3cSDanylo Vodopianov 	return cur_seg_num;
678da25ae3cSDanylo Vodopianov }
679da25ae3cSDanylo Vodopianov 
680da25ae3cSDanylo Vodopianov static uint16_t eth_dev_tx_scg(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
681da25ae3cSDanylo Vodopianov {
682da25ae3cSDanylo Vodopianov 	uint16_t pkt;
683da25ae3cSDanylo Vodopianov 	uint16_t first_vq_descr_idx = 0;
684da25ae3cSDanylo Vodopianov 
685da25ae3cSDanylo Vodopianov 	struct nthw_cvirtq_desc cvq_desc;
686da25ae3cSDanylo Vodopianov 
687da25ae3cSDanylo Vodopianov 	struct nthw_memory_descriptor *vq_bufs;
688da25ae3cSDanylo Vodopianov 
689da25ae3cSDanylo Vodopianov 	struct ntnic_tx_queue *tx_q = queue;
690da25ae3cSDanylo Vodopianov 
691da25ae3cSDanylo Vodopianov 	int nb_segs = 0, i;
692da25ae3cSDanylo Vodopianov 	int pkts_sent = 0;
693da25ae3cSDanylo Vodopianov 	uint16_t nb_segs_arr[MAX_TX_PACKETS];
694da25ae3cSDanylo Vodopianov 
695da25ae3cSDanylo Vodopianov 	if (kill_pmd)
696da25ae3cSDanylo Vodopianov 		return 0;
697da25ae3cSDanylo Vodopianov 
698da25ae3cSDanylo Vodopianov 	if (nb_pkts > MAX_TX_PACKETS)
699da25ae3cSDanylo Vodopianov 		nb_pkts = MAX_TX_PACKETS;
700da25ae3cSDanylo Vodopianov 
701da25ae3cSDanylo Vodopianov 	/*
702da25ae3cSDanylo Vodopianov 	 * count all segments needed to contain all packets in vq buffers
703da25ae3cSDanylo Vodopianov 	 */
704da25ae3cSDanylo Vodopianov 	for (i = 0; i < nb_pkts; i++) {
705da25ae3cSDanylo Vodopianov 		/* build the num segments array for segmentation control and release function */
706da25ae3cSDanylo Vodopianov 		int vq_segs = NUM_VQ_SEGS(bufs[i]->pkt_len);
707da25ae3cSDanylo Vodopianov 		nb_segs_arr[i] = vq_segs;
708da25ae3cSDanylo Vodopianov 		nb_segs += vq_segs;
709da25ae3cSDanylo Vodopianov 	}
710da25ae3cSDanylo Vodopianov 
711da25ae3cSDanylo Vodopianov 	if (!nb_segs)
712da25ae3cSDanylo Vodopianov 		goto exit_out;
713da25ae3cSDanylo Vodopianov 
714da25ae3cSDanylo Vodopianov 	if (sg_ops == NULL)
715da25ae3cSDanylo Vodopianov 		goto exit_out;
716da25ae3cSDanylo Vodopianov 
717da25ae3cSDanylo Vodopianov 	int got_nb_segs = sg_ops->nthw_get_tx_packets(tx_q->vq, nb_segs, &first_vq_descr_idx,
718da25ae3cSDanylo Vodopianov 			&cvq_desc /*&vq_descr,*/, &vq_bufs);
719da25ae3cSDanylo Vodopianov 
720da25ae3cSDanylo Vodopianov 	if (!got_nb_segs)
721da25ae3cSDanylo Vodopianov 		goto exit_out;
722da25ae3cSDanylo Vodopianov 
723da25ae3cSDanylo Vodopianov 	/*
724da25ae3cSDanylo Vodopianov 	 * we may get less vq buffers than we have asked for
725da25ae3cSDanylo Vodopianov 	 * calculate last whole packet that can fit into what
726da25ae3cSDanylo Vodopianov 	 * we have got
727da25ae3cSDanylo Vodopianov 	 */
728da25ae3cSDanylo Vodopianov 	while (got_nb_segs < nb_segs) {
729da25ae3cSDanylo Vodopianov 		if (!--nb_pkts)
730da25ae3cSDanylo Vodopianov 			goto exit_out;
731da25ae3cSDanylo Vodopianov 
732da25ae3cSDanylo Vodopianov 		nb_segs -= NUM_VQ_SEGS(bufs[nb_pkts]->pkt_len);
733da25ae3cSDanylo Vodopianov 
734da25ae3cSDanylo Vodopianov 		if (nb_segs <= 0)
735da25ae3cSDanylo Vodopianov 			goto exit_out;
736da25ae3cSDanylo Vodopianov 	}
737da25ae3cSDanylo Vodopianov 
738da25ae3cSDanylo Vodopianov 	/*
739da25ae3cSDanylo Vodopianov 	 * nb_pkts & nb_segs, got it all, ready to copy
740da25ae3cSDanylo Vodopianov 	 */
741da25ae3cSDanylo Vodopianov 	int seg_idx = 0;
742da25ae3cSDanylo Vodopianov 	int last_seg_idx = seg_idx;
743da25ae3cSDanylo Vodopianov 
744da25ae3cSDanylo Vodopianov 	for (pkt = 0; pkt < nb_pkts; ++pkt) {
745da25ae3cSDanylo Vodopianov 		uint16_t vq_descr_idx = VIRTQ_DESCR_IDX(seg_idx);
746da25ae3cSDanylo Vodopianov 
747da25ae3cSDanylo Vodopianov 		vq_set_flags(&cvq_desc, vq_descr_idx, 0);
748da25ae3cSDanylo Vodopianov 		vq_set_next(&cvq_desc, vq_descr_idx, 0);
749da25ae3cSDanylo Vodopianov 
750da25ae3cSDanylo Vodopianov 		if (bufs[pkt]->nb_segs == 1 && nb_segs_arr[pkt] == 1) {
751da25ae3cSDanylo Vodopianov 			rte_memcpy((void *)((char *)vq_bufs[vq_descr_idx].virt_addr + SG_HDR_SIZE),
752da25ae3cSDanylo Vodopianov 				rte_pktmbuf_mtod(bufs[pkt], void *), bufs[pkt]->pkt_len);
753da25ae3cSDanylo Vodopianov 
754da25ae3cSDanylo Vodopianov 			cvq_desc.b[vq_descr_idx].len = bufs[pkt]->pkt_len + SG_HDR_SIZE;
755da25ae3cSDanylo Vodopianov 
756da25ae3cSDanylo Vodopianov 			seg_idx++;
757da25ae3cSDanylo Vodopianov 
758da25ae3cSDanylo Vodopianov 		} else {
759da25ae3cSDanylo Vodopianov 			int cpy_segs = copy_mbuf_to_virtqueue(&cvq_desc, vq_descr_idx, vq_bufs,
760da25ae3cSDanylo Vodopianov 					nb_segs - last_seg_idx, bufs[pkt]);
761da25ae3cSDanylo Vodopianov 
762da25ae3cSDanylo Vodopianov 			if (cpy_segs < 0)
763da25ae3cSDanylo Vodopianov 				break;
764da25ae3cSDanylo Vodopianov 
765da25ae3cSDanylo Vodopianov 			seg_idx += cpy_segs;
766da25ae3cSDanylo Vodopianov 		}
767da25ae3cSDanylo Vodopianov 
768da25ae3cSDanylo Vodopianov 		last_seg_idx = seg_idx;
769da25ae3cSDanylo Vodopianov 		rte_pktmbuf_free(bufs[pkt]);
770da25ae3cSDanylo Vodopianov 		pkts_sent++;
771da25ae3cSDanylo Vodopianov 	}
772da25ae3cSDanylo Vodopianov 
773da25ae3cSDanylo Vodopianov exit_out:
774da25ae3cSDanylo Vodopianov 
775da25ae3cSDanylo Vodopianov 	if (sg_ops != NULL) {
776da25ae3cSDanylo Vodopianov 		if (pkts_sent)
777da25ae3cSDanylo Vodopianov 			sg_ops->nthw_release_tx_packets(tx_q->vq, pkts_sent, nb_segs_arr);
778da25ae3cSDanylo Vodopianov 	}
779da25ae3cSDanylo Vodopianov 
780da25ae3cSDanylo Vodopianov 	return pkts_sent;
781da25ae3cSDanylo Vodopianov }
782da25ae3cSDanylo Vodopianov 
7836b0047faSDanylo Vodopianov static int allocate_hw_virtio_queues(struct rte_eth_dev *eth_dev, int vf_num, struct hwq_s *hwq,
7846b0047faSDanylo Vodopianov 	int num_descr, int buf_size)
7856b0047faSDanylo Vodopianov {
7866b0047faSDanylo Vodopianov 	int i, res;
7876b0047faSDanylo Vodopianov 	uint32_t size;
7886b0047faSDanylo Vodopianov 	uint64_t iova_addr;
7896b0047faSDanylo Vodopianov 
7906b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "***** Configure IOMMU for HW queues on VF %i *****", vf_num);
7916b0047faSDanylo Vodopianov 
7926b0047faSDanylo Vodopianov 	/* Just allocate 1MB to hold all combined descr rings */
7936b0047faSDanylo Vodopianov 	uint64_t tot_alloc_size = 0x100000 + buf_size * num_descr;
7946b0047faSDanylo Vodopianov 
7956b0047faSDanylo Vodopianov 	void *virt =
7966b0047faSDanylo Vodopianov 		rte_malloc_socket("VirtQDescr", tot_alloc_size, nt_util_align_size(tot_alloc_size),
7976b0047faSDanylo Vodopianov 			eth_dev->data->numa_node);
7986b0047faSDanylo Vodopianov 
7996b0047faSDanylo Vodopianov 	if (!virt)
8006b0047faSDanylo Vodopianov 		return -1;
8016b0047faSDanylo Vodopianov 
8026b0047faSDanylo Vodopianov 	uint64_t gp_offset = (uint64_t)virt & ONE_G_MASK;
8036b0047faSDanylo Vodopianov 	rte_iova_t hpa = rte_malloc_virt2iova(virt);
8046b0047faSDanylo Vodopianov 
8056b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "Allocated virtio descr rings : virt "
8066b0047faSDanylo Vodopianov 		"%p [0x%" PRIX64 "],hpa %" PRIX64 " [0x%" PRIX64 "]",
8076b0047faSDanylo Vodopianov 		virt, gp_offset, hpa, hpa & ONE_G_MASK);
8086b0047faSDanylo Vodopianov 
8096b0047faSDanylo Vodopianov 	/*
8106b0047faSDanylo Vodopianov 	 * Same offset on both HPA and IOVA
8116b0047faSDanylo Vodopianov 	 * Make sure 1G boundary is never crossed
8126b0047faSDanylo Vodopianov 	 */
8136b0047faSDanylo Vodopianov 	if (((hpa & ONE_G_MASK) != gp_offset) ||
8146b0047faSDanylo Vodopianov 		(((uint64_t)virt + tot_alloc_size) & ~ONE_G_MASK) !=
8156b0047faSDanylo Vodopianov 		((uint64_t)virt & ~ONE_G_MASK)) {
8166b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "*********************************************************");
8176b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "ERROR, no optimal IOMMU mapping available hpa: %016" PRIX64
8186b0047faSDanylo Vodopianov 			"(%016" PRIX64 "), gp_offset: %016" PRIX64 " size: %" PRIu64,
8196b0047faSDanylo Vodopianov 			hpa, hpa & ONE_G_MASK, gp_offset, tot_alloc_size);
8206b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "*********************************************************");
8216b0047faSDanylo Vodopianov 
8226b0047faSDanylo Vodopianov 		rte_free(virt);
8236b0047faSDanylo Vodopianov 
8246b0047faSDanylo Vodopianov 		/* Just allocate 1MB to hold all combined descr rings */
8256b0047faSDanylo Vodopianov 		size = 0x100000;
8266b0047faSDanylo Vodopianov 		void *virt = rte_malloc_socket("VirtQDescr", size, 4096, eth_dev->data->numa_node);
8276b0047faSDanylo Vodopianov 
8286b0047faSDanylo Vodopianov 		if (!virt)
8296b0047faSDanylo Vodopianov 			return -1;
8306b0047faSDanylo Vodopianov 
8316b0047faSDanylo Vodopianov 		res = nt_vfio_dma_map(vf_num, virt, &iova_addr, size);
8326b0047faSDanylo Vodopianov 
8336b0047faSDanylo Vodopianov 		NT_LOG(DBG, NTNIC, "VFIO MMAP res %i, vf_num %i", res, vf_num);
8346b0047faSDanylo Vodopianov 
8356b0047faSDanylo Vodopianov 		if (res != 0)
8366b0047faSDanylo Vodopianov 			return -1;
8376b0047faSDanylo Vodopianov 
8386b0047faSDanylo Vodopianov 		hwq->vf_num = vf_num;
8396b0047faSDanylo Vodopianov 		hwq->virt_queues_ctrl.virt_addr = virt;
8406b0047faSDanylo Vodopianov 		hwq->virt_queues_ctrl.phys_addr = (void *)iova_addr;
8416b0047faSDanylo Vodopianov 		hwq->virt_queues_ctrl.len = size;
8426b0047faSDanylo Vodopianov 
8436b0047faSDanylo Vodopianov 		NT_LOG(DBG, NTNIC,
8446b0047faSDanylo Vodopianov 			"Allocated for virtio descr rings combined 1MB : %p, IOVA %016" PRIX64 "",
8456b0047faSDanylo Vodopianov 			virt, iova_addr);
8466b0047faSDanylo Vodopianov 
8476b0047faSDanylo Vodopianov 		size = num_descr * sizeof(struct nthw_memory_descriptor);
8486b0047faSDanylo Vodopianov 		hwq->pkt_buffers =
8496b0047faSDanylo Vodopianov 			rte_zmalloc_socket("rx_pkt_buffers", size, 64, eth_dev->data->numa_node);
8506b0047faSDanylo Vodopianov 
8516b0047faSDanylo Vodopianov 		if (!hwq->pkt_buffers) {
8526b0047faSDanylo Vodopianov 			NT_LOG(ERR, NTNIC,
8536b0047faSDanylo Vodopianov 				"Failed to allocated buffer array for hw-queue %p, total size %i, elements %i",
8546b0047faSDanylo Vodopianov 				hwq->pkt_buffers, size, num_descr);
8556b0047faSDanylo Vodopianov 			rte_free(virt);
8566b0047faSDanylo Vodopianov 			return -1;
8576b0047faSDanylo Vodopianov 		}
8586b0047faSDanylo Vodopianov 
8596b0047faSDanylo Vodopianov 		size = buf_size * num_descr;
8606b0047faSDanylo Vodopianov 		void *virt_addr =
8616b0047faSDanylo Vodopianov 			rte_malloc_socket("pkt_buffer_pkts", size, 4096, eth_dev->data->numa_node);
8626b0047faSDanylo Vodopianov 
8636b0047faSDanylo Vodopianov 		if (!virt_addr) {
8646b0047faSDanylo Vodopianov 			NT_LOG(ERR, NTNIC,
8656b0047faSDanylo Vodopianov 				"Failed allocate packet buffers for hw-queue %p, buf size %i, elements %i",
8666b0047faSDanylo Vodopianov 				hwq->pkt_buffers, buf_size, num_descr);
8676b0047faSDanylo Vodopianov 			rte_free(hwq->pkt_buffers);
8686b0047faSDanylo Vodopianov 			rte_free(virt);
8696b0047faSDanylo Vodopianov 			return -1;
8706b0047faSDanylo Vodopianov 		}
8716b0047faSDanylo Vodopianov 
8726b0047faSDanylo Vodopianov 		res = nt_vfio_dma_map(vf_num, virt_addr, &iova_addr, size);
8736b0047faSDanylo Vodopianov 
8746b0047faSDanylo Vodopianov 		NT_LOG(DBG, NTNIC,
8756b0047faSDanylo Vodopianov 			"VFIO MMAP res %i, virt %p, iova %016" PRIX64 ", vf_num %i, num pkt bufs %i, tot size %i",
8766b0047faSDanylo Vodopianov 			res, virt_addr, iova_addr, vf_num, num_descr, size);
8776b0047faSDanylo Vodopianov 
8786b0047faSDanylo Vodopianov 		if (res != 0)
8796b0047faSDanylo Vodopianov 			return -1;
8806b0047faSDanylo Vodopianov 
8816b0047faSDanylo Vodopianov 		for (i = 0; i < num_descr; i++) {
8826b0047faSDanylo Vodopianov 			hwq->pkt_buffers[i].virt_addr =
8836b0047faSDanylo Vodopianov 				(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
8846b0047faSDanylo Vodopianov 			hwq->pkt_buffers[i].phys_addr =
8856b0047faSDanylo Vodopianov 				(void *)(iova_addr + ((uint64_t)(i) * buf_size));
8866b0047faSDanylo Vodopianov 			hwq->pkt_buffers[i].len = buf_size;
8876b0047faSDanylo Vodopianov 		}
8886b0047faSDanylo Vodopianov 
8896b0047faSDanylo Vodopianov 		return 0;
8906b0047faSDanylo Vodopianov 	}	/* End of: no optimal IOMMU mapping available */
8916b0047faSDanylo Vodopianov 
8926b0047faSDanylo Vodopianov 	res = nt_vfio_dma_map(vf_num, virt, &iova_addr, ONE_G_SIZE);
8936b0047faSDanylo Vodopianov 
8946b0047faSDanylo Vodopianov 	if (res != 0) {
8956b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "VFIO MMAP FAILED! res %i, vf_num %i", res, vf_num);
8966b0047faSDanylo Vodopianov 		return -1;
8976b0047faSDanylo Vodopianov 	}
8986b0047faSDanylo Vodopianov 
8996b0047faSDanylo Vodopianov 	hwq->vf_num = vf_num;
9006b0047faSDanylo Vodopianov 	hwq->virt_queues_ctrl.virt_addr = virt;
9016b0047faSDanylo Vodopianov 	hwq->virt_queues_ctrl.phys_addr = (void *)(iova_addr);
9026b0047faSDanylo Vodopianov 	hwq->virt_queues_ctrl.len = 0x100000;
9036b0047faSDanylo Vodopianov 	iova_addr += 0x100000;
9046b0047faSDanylo Vodopianov 
9056b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC,
9066b0047faSDanylo Vodopianov 		"VFIO MMAP: virt_addr=%p phys_addr=%p size=%" PRIX32 " hpa=%" PRIX64 "",
9076b0047faSDanylo Vodopianov 		hwq->virt_queues_ctrl.virt_addr, hwq->virt_queues_ctrl.phys_addr,
9086b0047faSDanylo Vodopianov 		hwq->virt_queues_ctrl.len, rte_malloc_virt2iova(hwq->virt_queues_ctrl.virt_addr));
9096b0047faSDanylo Vodopianov 
9106b0047faSDanylo Vodopianov 	size = num_descr * sizeof(struct nthw_memory_descriptor);
9116b0047faSDanylo Vodopianov 	hwq->pkt_buffers =
9126b0047faSDanylo Vodopianov 		rte_zmalloc_socket("rx_pkt_buffers", size, 64, eth_dev->data->numa_node);
9136b0047faSDanylo Vodopianov 
9146b0047faSDanylo Vodopianov 	if (!hwq->pkt_buffers) {
9156b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC,
9166b0047faSDanylo Vodopianov 			"Failed to allocated buffer array for hw-queue %p, total size %i, elements %i",
9176b0047faSDanylo Vodopianov 			hwq->pkt_buffers, size, num_descr);
9186b0047faSDanylo Vodopianov 		rte_free(virt);
9196b0047faSDanylo Vodopianov 		return -1;
9206b0047faSDanylo Vodopianov 	}
9216b0047faSDanylo Vodopianov 
9226b0047faSDanylo Vodopianov 	void *virt_addr = (void *)((uint64_t)virt + 0x100000);
9236b0047faSDanylo Vodopianov 
9246b0047faSDanylo Vodopianov 	for (i = 0; i < num_descr; i++) {
9256b0047faSDanylo Vodopianov 		hwq->pkt_buffers[i].virt_addr =
9266b0047faSDanylo Vodopianov 			(void *)((char *)virt_addr + ((uint64_t)(i) * buf_size));
9276b0047faSDanylo Vodopianov 		hwq->pkt_buffers[i].phys_addr = (void *)(iova_addr + ((uint64_t)(i) * buf_size));
9286b0047faSDanylo Vodopianov 		hwq->pkt_buffers[i].len = buf_size;
9296b0047faSDanylo Vodopianov 	}
9306b0047faSDanylo Vodopianov 
9316b0047faSDanylo Vodopianov 	return 0;
9326b0047faSDanylo Vodopianov }
9336b0047faSDanylo Vodopianov 
934b0cd36e9SDanylo Vodopianov static void release_hw_virtio_queues(struct hwq_s *hwq)
935b0cd36e9SDanylo Vodopianov {
936b0cd36e9SDanylo Vodopianov 	if (!hwq || hwq->vf_num == 0)
937b0cd36e9SDanylo Vodopianov 		return;
938b0cd36e9SDanylo Vodopianov 
939b0cd36e9SDanylo Vodopianov 	hwq->vf_num = 0;
940b0cd36e9SDanylo Vodopianov }
941b0cd36e9SDanylo Vodopianov 
9425284180aSDanylo Vodopianov static int deallocate_hw_virtio_queues(struct hwq_s *hwq)
9435284180aSDanylo Vodopianov {
9445284180aSDanylo Vodopianov 	int vf_num = hwq->vf_num;
9455284180aSDanylo Vodopianov 
9465284180aSDanylo Vodopianov 	void *virt = hwq->virt_queues_ctrl.virt_addr;
9475284180aSDanylo Vodopianov 
9485284180aSDanylo Vodopianov 	int res = nt_vfio_dma_unmap(vf_num, hwq->virt_queues_ctrl.virt_addr,
9495284180aSDanylo Vodopianov 			(uint64_t)hwq->virt_queues_ctrl.phys_addr, ONE_G_SIZE);
9505284180aSDanylo Vodopianov 
9515284180aSDanylo Vodopianov 	if (res != 0) {
9525284180aSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "VFIO UNMMAP FAILED! res %i, vf_num %i", res, vf_num);
9535284180aSDanylo Vodopianov 		return -1;
9545284180aSDanylo Vodopianov 	}
9555284180aSDanylo Vodopianov 
9565284180aSDanylo Vodopianov 	release_hw_virtio_queues(hwq);
9575284180aSDanylo Vodopianov 	rte_free(hwq->pkt_buffers);
9585284180aSDanylo Vodopianov 	rte_free(virt);
9595284180aSDanylo Vodopianov 	return 0;
9605284180aSDanylo Vodopianov }
9615284180aSDanylo Vodopianov 
962fe91ade9SDanylo Vodopianov static void eth_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id)
963fe91ade9SDanylo Vodopianov {
964e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
9655284180aSDanylo Vodopianov 	struct ntnic_tx_queue *tx_q = &internals->txq_scg[queue_id];
9665284180aSDanylo Vodopianov 	deallocate_hw_virtio_queues(&tx_q->hwq);
967fe91ade9SDanylo Vodopianov }
968fe91ade9SDanylo Vodopianov 
969fe91ade9SDanylo Vodopianov static void eth_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t queue_id)
970fe91ade9SDanylo Vodopianov {
971e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
9725284180aSDanylo Vodopianov 	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[queue_id];
9735284180aSDanylo Vodopianov 	deallocate_hw_virtio_queues(&rx_q->hwq);
974fe91ade9SDanylo Vodopianov }
975fe91ade9SDanylo Vodopianov 
976fe91ade9SDanylo Vodopianov static int num_queues_alloced;
977fe91ade9SDanylo Vodopianov 
978fe91ade9SDanylo Vodopianov /* Returns num queue starting at returned queue num or -1 on fail */
979fe91ade9SDanylo Vodopianov static int allocate_queue(int num)
980fe91ade9SDanylo Vodopianov {
981fe91ade9SDanylo Vodopianov 	int next_free = num_queues_alloced;
982fe91ade9SDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "num_queues_alloced=%u, New queues=%u, Max queues=%u",
983fe91ade9SDanylo Vodopianov 		num_queues_alloced, num, MAX_TOTAL_QUEUES);
984fe91ade9SDanylo Vodopianov 
985fe91ade9SDanylo Vodopianov 	if (num_queues_alloced + num > MAX_TOTAL_QUEUES)
986fe91ade9SDanylo Vodopianov 		return -1;
987fe91ade9SDanylo Vodopianov 
988fe91ade9SDanylo Vodopianov 	num_queues_alloced += num;
989fe91ade9SDanylo Vodopianov 	return next_free;
990fe91ade9SDanylo Vodopianov }
991fe91ade9SDanylo Vodopianov 
9926b0047faSDanylo Vodopianov static int eth_rx_scg_queue_setup(struct rte_eth_dev *eth_dev,
9936b0047faSDanylo Vodopianov 	uint16_t rx_queue_id,
9946b0047faSDanylo Vodopianov 	uint16_t nb_rx_desc __rte_unused,
9956b0047faSDanylo Vodopianov 	unsigned int socket_id __rte_unused,
9966b0047faSDanylo Vodopianov 	const struct rte_eth_rxconf *rx_conf __rte_unused,
9976b0047faSDanylo Vodopianov 	struct rte_mempool *mb_pool)
9986b0047faSDanylo Vodopianov {
9996b0047faSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Rx queue setup");
10006b0047faSDanylo Vodopianov 	struct rte_pktmbuf_pool_private *mbp_priv;
1001e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
10026b0047faSDanylo Vodopianov 	struct ntnic_rx_queue *rx_q = &internals->rxq_scg[rx_queue_id];
10036b0047faSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
10046b0047faSDanylo Vodopianov 	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
10056b0047faSDanylo Vodopianov 
10066b0047faSDanylo Vodopianov 	if (sg_ops == NULL) {
10076b0047faSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "SG module is not initialized");
10086b0047faSDanylo Vodopianov 		return 0;
10096b0047faSDanylo Vodopianov 	}
10106b0047faSDanylo Vodopianov 
10116b0047faSDanylo Vodopianov 	if (internals->type == PORT_TYPE_OVERRIDE) {
10126b0047faSDanylo Vodopianov 		rx_q->mb_pool = mb_pool;
10136b0047faSDanylo Vodopianov 		eth_dev->data->rx_queues[rx_queue_id] = rx_q;
10146b0047faSDanylo Vodopianov 		mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
10156b0047faSDanylo Vodopianov 		rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
10166b0047faSDanylo Vodopianov 		rx_q->enabled = 1;
10176b0047faSDanylo Vodopianov 		return 0;
10186b0047faSDanylo Vodopianov 	}
10196b0047faSDanylo Vodopianov 
10206b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "(%i) NTNIC RX OVS-SW queue setup: queue id %i, hw queue index %i",
10216b0047faSDanylo Vodopianov 		internals->port, rx_queue_id, rx_q->queue.hw_id);
10226b0047faSDanylo Vodopianov 
10236b0047faSDanylo Vodopianov 	rx_q->mb_pool = mb_pool;
10246b0047faSDanylo Vodopianov 
10256b0047faSDanylo Vodopianov 	eth_dev->data->rx_queues[rx_queue_id] = rx_q;
10266b0047faSDanylo Vodopianov 
10276b0047faSDanylo Vodopianov 	mbp_priv = rte_mempool_get_priv(rx_q->mb_pool);
10286b0047faSDanylo Vodopianov 	rx_q->buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
10296b0047faSDanylo Vodopianov 	rx_q->enabled = 1;
10306b0047faSDanylo Vodopianov 
10316b0047faSDanylo Vodopianov 	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &rx_q->hwq,
10326b0047faSDanylo Vodopianov 			SG_NB_HW_RX_DESCRIPTORS, SG_HW_RX_PKT_BUFFER_SIZE) < 0)
10336b0047faSDanylo Vodopianov 		return -1;
10346b0047faSDanylo Vodopianov 
10356b0047faSDanylo Vodopianov 	rx_q->nb_hw_rx_descr = SG_NB_HW_RX_DESCRIPTORS;
10366b0047faSDanylo Vodopianov 
10376b0047faSDanylo Vodopianov 	rx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
10386b0047faSDanylo Vodopianov 
10396b0047faSDanylo Vodopianov 	rx_q->vq =
10406b0047faSDanylo Vodopianov 		sg_ops->nthw_setup_mngd_rx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
10416b0047faSDanylo Vodopianov 			rx_q->queue.hw_id,	/* index */
10426b0047faSDanylo Vodopianov 			rx_q->nb_hw_rx_descr,
10436b0047faSDanylo Vodopianov 			EXCEPTION_PATH_HID,	/* host_id */
10446b0047faSDanylo Vodopianov 			1,	/* header NT DVIO header for exception path */
10456b0047faSDanylo Vodopianov 			&rx_q->hwq.virt_queues_ctrl,
10466b0047faSDanylo Vodopianov 			rx_q->hwq.pkt_buffers,
10476b0047faSDanylo Vodopianov 			SPLIT_RING,
10486b0047faSDanylo Vodopianov 			-1);
10496b0047faSDanylo Vodopianov 
10506b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "(%i) NTNIC RX OVS-SW queues successfully setup", internals->port);
10516b0047faSDanylo Vodopianov 
10526b0047faSDanylo Vodopianov 	return 0;
10536b0047faSDanylo Vodopianov }
10546b0047faSDanylo Vodopianov 
10556b0047faSDanylo Vodopianov static int eth_tx_scg_queue_setup(struct rte_eth_dev *eth_dev,
10566b0047faSDanylo Vodopianov 	uint16_t tx_queue_id,
10576b0047faSDanylo Vodopianov 	uint16_t nb_tx_desc __rte_unused,
10586b0047faSDanylo Vodopianov 	unsigned int socket_id __rte_unused,
10596b0047faSDanylo Vodopianov 	const struct rte_eth_txconf *tx_conf __rte_unused)
10606b0047faSDanylo Vodopianov {
10616b0047faSDanylo Vodopianov 	const struct port_ops *port_ops = get_port_ops();
10626b0047faSDanylo Vodopianov 
10636b0047faSDanylo Vodopianov 	if (port_ops == NULL) {
10646b0047faSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "Link management module uninitialized");
10656b0047faSDanylo Vodopianov 		return -1;
10666b0047faSDanylo Vodopianov 	}
10676b0047faSDanylo Vodopianov 
10686b0047faSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Tx queue setup");
1069e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
10706b0047faSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
10716b0047faSDanylo Vodopianov 	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
10726b0047faSDanylo Vodopianov 	struct ntnic_tx_queue *tx_q = &internals->txq_scg[tx_queue_id];
10736b0047faSDanylo Vodopianov 
10746b0047faSDanylo Vodopianov 	if (internals->type == PORT_TYPE_OVERRIDE) {
10756b0047faSDanylo Vodopianov 		eth_dev->data->tx_queues[tx_queue_id] = tx_q;
10766b0047faSDanylo Vodopianov 		return 0;
10776b0047faSDanylo Vodopianov 	}
10786b0047faSDanylo Vodopianov 
10796b0047faSDanylo Vodopianov 	if (sg_ops == NULL) {
10806b0047faSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "SG module is not initialized");
10816b0047faSDanylo Vodopianov 		return 0;
10826b0047faSDanylo Vodopianov 	}
10836b0047faSDanylo Vodopianov 
10846b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "(%i) NTNIC TX OVS-SW queue setup: queue id %i, hw queue index %i",
10856b0047faSDanylo Vodopianov 		tx_q->port, tx_queue_id, tx_q->queue.hw_id);
10866b0047faSDanylo Vodopianov 
10876b0047faSDanylo Vodopianov 	if (tx_queue_id > internals->nb_tx_queues) {
10886b0047faSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Error invalid tx queue id");
10896b0047faSDanylo Vodopianov 		return -1;
10906b0047faSDanylo Vodopianov 	}
10916b0047faSDanylo Vodopianov 
10926b0047faSDanylo Vodopianov 	eth_dev->data->tx_queues[tx_queue_id] = tx_q;
10936b0047faSDanylo Vodopianov 
10946b0047faSDanylo Vodopianov 	/* Calculate target ID for HW  - to be used in NTDVIO0 header bypass_port */
10956b0047faSDanylo Vodopianov 	if (tx_q->rss_target_id >= 0) {
10966b0047faSDanylo Vodopianov 		/* bypass to a multiqueue port - qsl-hsh index */
10976b0047faSDanylo Vodopianov 		tx_q->target_id = tx_q->rss_target_id + 0x90;
10986b0047faSDanylo Vodopianov 
10996b0047faSDanylo Vodopianov 	} else if (internals->vpq[tx_queue_id].hw_id > -1) {
11006b0047faSDanylo Vodopianov 		/* virtual port - queue index */
11016b0047faSDanylo Vodopianov 		tx_q->target_id = internals->vpq[tx_queue_id].hw_id;
11026b0047faSDanylo Vodopianov 
11036b0047faSDanylo Vodopianov 	} else {
11046b0047faSDanylo Vodopianov 		/* Phy port - phy port identifier */
11056b0047faSDanylo Vodopianov 		/* output/bypass to MAC */
11066b0047faSDanylo Vodopianov 		tx_q->target_id = (int)(tx_q->port + 0x80);
11076b0047faSDanylo Vodopianov 	}
11086b0047faSDanylo Vodopianov 
11096b0047faSDanylo Vodopianov 	if (allocate_hw_virtio_queues(eth_dev, EXCEPTION_PATH_HID, &tx_q->hwq,
11106b0047faSDanylo Vodopianov 			SG_NB_HW_TX_DESCRIPTORS, SG_HW_TX_PKT_BUFFER_SIZE) < 0) {
11116b0047faSDanylo Vodopianov 		return -1;
11126b0047faSDanylo Vodopianov 	}
11136b0047faSDanylo Vodopianov 
11146b0047faSDanylo Vodopianov 	tx_q->nb_hw_tx_descr = SG_NB_HW_TX_DESCRIPTORS;
11156b0047faSDanylo Vodopianov 
11166b0047faSDanylo Vodopianov 	tx_q->profile = p_drv->ntdrv.adapter_info.fpga_info.profile;
11176b0047faSDanylo Vodopianov 
11186b0047faSDanylo Vodopianov 	uint32_t port, header;
11196b0047faSDanylo Vodopianov 	port = tx_q->port;	/* transmit port */
11206b0047faSDanylo Vodopianov 	header = 0;	/* header type VirtIO-Net */
11216b0047faSDanylo Vodopianov 
11226b0047faSDanylo Vodopianov 	tx_q->vq =
11236b0047faSDanylo Vodopianov 		sg_ops->nthw_setup_mngd_tx_virt_queue(p_nt_drv->adapter_info.fpga_info.mp_nthw_dbs,
11246b0047faSDanylo Vodopianov 			tx_q->queue.hw_id,	/* index */
11256b0047faSDanylo Vodopianov 			tx_q->nb_hw_tx_descr,	/* queue size */
11266b0047faSDanylo Vodopianov 			EXCEPTION_PATH_HID,	/* host_id always VF4 */
11276b0047faSDanylo Vodopianov 			port,
11286b0047faSDanylo Vodopianov 			/*
11296b0047faSDanylo Vodopianov 			 * in_port - in vswitch mode has
11306b0047faSDanylo Vodopianov 			 * to move tx port from OVS excep.
11316b0047faSDanylo Vodopianov 			 * away from VM tx port,
11326b0047faSDanylo Vodopianov 			 * because of QoS is matched by port id!
11336b0047faSDanylo Vodopianov 			 */
11346b0047faSDanylo Vodopianov 			tx_q->port + 128,
11356b0047faSDanylo Vodopianov 			header,
11366b0047faSDanylo Vodopianov 			&tx_q->hwq.virt_queues_ctrl,
11376b0047faSDanylo Vodopianov 			tx_q->hwq.pkt_buffers,
11386b0047faSDanylo Vodopianov 			SPLIT_RING,
11396b0047faSDanylo Vodopianov 			-1,
11406b0047faSDanylo Vodopianov 			IN_ORDER);
11416b0047faSDanylo Vodopianov 
11426b0047faSDanylo Vodopianov 	tx_q->enabled = 1;
11436b0047faSDanylo Vodopianov 
11446b0047faSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "(%i) NTNIC TX OVS-SW queues successfully setup", internals->port);
11456b0047faSDanylo Vodopianov 
11466b0047faSDanylo Vodopianov 	if (internals->type == PORT_TYPE_PHYSICAL) {
11476b0047faSDanylo Vodopianov 		struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
11486b0047faSDanylo Vodopianov 		NT_LOG(DBG, NTNIC, "Port %i is ready for data. Enable port",
11496b0047faSDanylo Vodopianov 			internals->n_intf_no);
11506b0047faSDanylo Vodopianov 		port_ops->set_adm_state(p_adapter_info, internals->n_intf_no, true);
11516b0047faSDanylo Vodopianov 	}
11526b0047faSDanylo Vodopianov 
11536b0047faSDanylo Vodopianov 	return 0;
11546b0047faSDanylo Vodopianov }
11556b0047faSDanylo Vodopianov 
1156*6019656dSOleksandr Kolomeiets static int dev_set_mtu_inline(struct rte_eth_dev *eth_dev, uint16_t mtu)
1157*6019656dSOleksandr Kolomeiets {
1158*6019656dSOleksandr Kolomeiets 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
1159*6019656dSOleksandr Kolomeiets 
1160*6019656dSOleksandr Kolomeiets 	if (profile_inline_ops == NULL) {
1161*6019656dSOleksandr Kolomeiets 		NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
1162*6019656dSOleksandr Kolomeiets 		return -1;
1163*6019656dSOleksandr Kolomeiets 	}
1164*6019656dSOleksandr Kolomeiets 
1165*6019656dSOleksandr Kolomeiets 	struct pmd_internals *internals = (struct pmd_internals *)eth_dev->data->dev_private;
1166*6019656dSOleksandr Kolomeiets 
1167*6019656dSOleksandr Kolomeiets 	struct flow_eth_dev *flw_dev = internals->flw_dev;
1168*6019656dSOleksandr Kolomeiets 	int ret = -1;
1169*6019656dSOleksandr Kolomeiets 
1170*6019656dSOleksandr Kolomeiets 	if (internals->type == PORT_TYPE_PHYSICAL && mtu >= MIN_MTU_INLINE && mtu <= MAX_MTU)
1171*6019656dSOleksandr Kolomeiets 		ret = profile_inline_ops->flow_set_mtu_inline(flw_dev, internals->port, mtu);
1172*6019656dSOleksandr Kolomeiets 
1173*6019656dSOleksandr Kolomeiets 	return ret ? -EINVAL : 0;
1174*6019656dSOleksandr Kolomeiets }
1175*6019656dSOleksandr Kolomeiets 
1176fe91ade9SDanylo Vodopianov static int eth_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1177fe91ade9SDanylo Vodopianov {
1178fe91ade9SDanylo Vodopianov 	eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1179fe91ade9SDanylo Vodopianov 	return 0;
1180fe91ade9SDanylo Vodopianov }
1181fe91ade9SDanylo Vodopianov 
1182fe91ade9SDanylo Vodopianov static int eth_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1183fe91ade9SDanylo Vodopianov {
1184fe91ade9SDanylo Vodopianov 	eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1185fe91ade9SDanylo Vodopianov 	return 0;
1186fe91ade9SDanylo Vodopianov }
1187fe91ade9SDanylo Vodopianov 
1188fe91ade9SDanylo Vodopianov static int eth_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1189fe91ade9SDanylo Vodopianov {
1190fe91ade9SDanylo Vodopianov 	eth_dev->data->tx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1191fe91ade9SDanylo Vodopianov 	return 0;
1192fe91ade9SDanylo Vodopianov }
1193fe91ade9SDanylo Vodopianov 
1194fe91ade9SDanylo Vodopianov static int eth_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1195fe91ade9SDanylo Vodopianov {
1196fe91ade9SDanylo Vodopianov 	eth_dev->data->tx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1197fe91ade9SDanylo Vodopianov 	return 0;
1198fe91ade9SDanylo Vodopianov }
1199fe91ade9SDanylo Vodopianov 
12009147e9f9SSerhii Iliushyk static int
12019147e9f9SSerhii Iliushyk eth_mac_addr_add(struct rte_eth_dev *eth_dev,
12029147e9f9SSerhii Iliushyk 	struct rte_ether_addr *mac_addr,
12039147e9f9SSerhii Iliushyk 	uint32_t index,
12049147e9f9SSerhii Iliushyk 	uint32_t vmdq __rte_unused)
12059147e9f9SSerhii Iliushyk {
12069147e9f9SSerhii Iliushyk 	struct rte_ether_addr *const eth_addrs = eth_dev->data->mac_addrs;
12079147e9f9SSerhii Iliushyk 
12089147e9f9SSerhii Iliushyk 	assert(index < NUM_MAC_ADDRS_PER_PORT);
12099147e9f9SSerhii Iliushyk 
12109147e9f9SSerhii Iliushyk 	if (index >= NUM_MAC_ADDRS_PER_PORT) {
12119147e9f9SSerhii Iliushyk 		const struct pmd_internals *const internals =
1212e0d9b3cdSSerhii Iliushyk 			eth_dev->data->dev_private;
12133489b87bSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "Port %i: illegal index %u (>= %u)",
12149147e9f9SSerhii Iliushyk 			internals->n_intf_no, index, NUM_MAC_ADDRS_PER_PORT);
12159147e9f9SSerhii Iliushyk 		return -1;
12169147e9f9SSerhii Iliushyk 	}
12179147e9f9SSerhii Iliushyk 
12189147e9f9SSerhii Iliushyk 	eth_addrs[index] = *mac_addr;
12199147e9f9SSerhii Iliushyk 
12209147e9f9SSerhii Iliushyk 	return 0;
12219147e9f9SSerhii Iliushyk }
12229147e9f9SSerhii Iliushyk 
12239147e9f9SSerhii Iliushyk static int
12249147e9f9SSerhii Iliushyk eth_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
12259147e9f9SSerhii Iliushyk {
12269147e9f9SSerhii Iliushyk 	struct rte_ether_addr *const eth_addrs = dev->data->mac_addrs;
12279147e9f9SSerhii Iliushyk 
12289147e9f9SSerhii Iliushyk 	eth_addrs[0U] = *mac_addr;
12299147e9f9SSerhii Iliushyk 
12309147e9f9SSerhii Iliushyk 	return 0;
12319147e9f9SSerhii Iliushyk }
12329147e9f9SSerhii Iliushyk 
12339147e9f9SSerhii Iliushyk static int
12349147e9f9SSerhii Iliushyk eth_set_mc_addr_list(struct rte_eth_dev *eth_dev,
12359147e9f9SSerhii Iliushyk 	struct rte_ether_addr *mc_addr_set,
12369147e9f9SSerhii Iliushyk 	uint32_t nb_mc_addr)
12379147e9f9SSerhii Iliushyk {
1238e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *const internals = eth_dev->data->dev_private;
12399147e9f9SSerhii Iliushyk 	struct rte_ether_addr *const mc_addrs = internals->mc_addrs;
12409147e9f9SSerhii Iliushyk 	size_t i;
12419147e9f9SSerhii Iliushyk 
12429147e9f9SSerhii Iliushyk 	if (nb_mc_addr >= NUM_MULTICAST_ADDRS_PER_PORT) {
12433489b87bSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC,
12443489b87bSDanylo Vodopianov 			"Port %i: too many multicast addresses %u (>= %u)",
12459147e9f9SSerhii Iliushyk 			internals->n_intf_no, nb_mc_addr, NUM_MULTICAST_ADDRS_PER_PORT);
12469147e9f9SSerhii Iliushyk 		return -1;
12479147e9f9SSerhii Iliushyk 	}
12489147e9f9SSerhii Iliushyk 
12499147e9f9SSerhii Iliushyk 	for (i = 0U; i < NUM_MULTICAST_ADDRS_PER_PORT; i++)
12509147e9f9SSerhii Iliushyk 		if (i < nb_mc_addr)
12519147e9f9SSerhii Iliushyk 			mc_addrs[i] = mc_addr_set[i];
12529147e9f9SSerhii Iliushyk 
12539147e9f9SSerhii Iliushyk 		else
12549147e9f9SSerhii Iliushyk 			(void)memset(&mc_addrs[i], 0, sizeof(mc_addrs[i]));
1255c93ef6edSSerhii Iliushyk 
1256c93ef6edSSerhii Iliushyk 	return 0;
1257c93ef6edSSerhii Iliushyk }
1258c93ef6edSSerhii Iliushyk 
1259c93ef6edSSerhii Iliushyk static int
1260c93ef6edSSerhii Iliushyk eth_dev_configure(struct rte_eth_dev *eth_dev)
1261c93ef6edSSerhii Iliushyk {
12623489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Called for eth_dev %p", eth_dev);
1263c93ef6edSSerhii Iliushyk 
1264c93ef6edSSerhii Iliushyk 	/* The device is ALWAYS running promiscuous mode. */
1265c93ef6edSSerhii Iliushyk 	eth_dev->data->promiscuous ^= ~eth_dev->data->promiscuous;
1266c93ef6edSSerhii Iliushyk 	return 0;
1267c93ef6edSSerhii Iliushyk }
1268c93ef6edSSerhii Iliushyk 
1269c93ef6edSSerhii Iliushyk static int
1270c93ef6edSSerhii Iliushyk eth_dev_start(struct rte_eth_dev *eth_dev)
1271c93ef6edSSerhii Iliushyk {
12729147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
12739147e9f9SSerhii Iliushyk 
12749147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
12753489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
12769147e9f9SSerhii Iliushyk 		return -1;
12779147e9f9SSerhii Iliushyk 	}
12789147e9f9SSerhii Iliushyk 
127987b3bb06SDanylo Vodopianov 	eth_dev->flow_fp_ops = get_dev_fp_flow_ops();
1280e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1281c93ef6edSSerhii Iliushyk 
12829147e9f9SSerhii Iliushyk 	const int n_intf_no = internals->n_intf_no;
12839147e9f9SSerhii Iliushyk 	struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
12849147e9f9SSerhii Iliushyk 
12853489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Port %u", internals->n_intf_no);
1286c93ef6edSSerhii Iliushyk 
1287fe91ade9SDanylo Vodopianov 	/* Start queues */
1288fe91ade9SDanylo Vodopianov 	uint q;
1289fe91ade9SDanylo Vodopianov 
1290fe91ade9SDanylo Vodopianov 	for (q = 0; q < internals->nb_rx_queues; q++)
1291fe91ade9SDanylo Vodopianov 		eth_rx_queue_start(eth_dev, q);
1292fe91ade9SDanylo Vodopianov 
1293fe91ade9SDanylo Vodopianov 	for (q = 0; q < internals->nb_tx_queues; q++)
1294fe91ade9SDanylo Vodopianov 		eth_tx_queue_start(eth_dev, q);
1295fe91ade9SDanylo Vodopianov 
12969147e9f9SSerhii Iliushyk 	if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE) {
12979147e9f9SSerhii Iliushyk 		eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
12989147e9f9SSerhii Iliushyk 
12999147e9f9SSerhii Iliushyk 	} else {
13009147e9f9SSerhii Iliushyk 		/* Enable the port */
13019147e9f9SSerhii Iliushyk 		port_ops->set_adm_state(p_adapter_info, internals->n_intf_no, true);
13029147e9f9SSerhii Iliushyk 
13039147e9f9SSerhii Iliushyk 		/*
13049147e9f9SSerhii Iliushyk 		 * wait for link on port
13059147e9f9SSerhii Iliushyk 		 * If application starts sending too soon before FPGA port is ready, garbage is
13069147e9f9SSerhii Iliushyk 		 * produced
13079147e9f9SSerhii Iliushyk 		 */
13089147e9f9SSerhii Iliushyk 		int loop = 0;
13099147e9f9SSerhii Iliushyk 
13109147e9f9SSerhii Iliushyk 		while (port_ops->get_link_status(p_adapter_info, n_intf_no) == RTE_ETH_LINK_DOWN) {
13119147e9f9SSerhii Iliushyk 			/* break out after 5 sec */
13129147e9f9SSerhii Iliushyk 			if (++loop >= 50) {
13133489b87bSDanylo Vodopianov 				NT_LOG_DBGX(DBG, NTNIC,
13143489b87bSDanylo Vodopianov 					"TIMEOUT No link on port %i (5sec timeout)",
13159147e9f9SSerhii Iliushyk 					internals->n_intf_no);
13169147e9f9SSerhii Iliushyk 				break;
13179147e9f9SSerhii Iliushyk 			}
13189147e9f9SSerhii Iliushyk 
13199147e9f9SSerhii Iliushyk 			nt_os_wait_usec(100 * 1000);
13209147e9f9SSerhii Iliushyk 		}
13219147e9f9SSerhii Iliushyk 
13229147e9f9SSerhii Iliushyk 		if (internals->lpbk_mode) {
13239147e9f9SSerhii Iliushyk 			if (internals->lpbk_mode & 1 << 0) {
13249147e9f9SSerhii Iliushyk 				port_ops->set_loopback_mode(p_adapter_info, n_intf_no,
13259147e9f9SSerhii Iliushyk 					NT_LINK_LOOPBACK_HOST);
13269147e9f9SSerhii Iliushyk 			}
13279147e9f9SSerhii Iliushyk 
13289147e9f9SSerhii Iliushyk 			if (internals->lpbk_mode & 1 << 1) {
13299147e9f9SSerhii Iliushyk 				port_ops->set_loopback_mode(p_adapter_info, n_intf_no,
13309147e9f9SSerhii Iliushyk 					NT_LINK_LOOPBACK_LINE);
13319147e9f9SSerhii Iliushyk 			}
13329147e9f9SSerhii Iliushyk 		}
13339147e9f9SSerhii Iliushyk 	}
13349147e9f9SSerhii Iliushyk 
1335c93ef6edSSerhii Iliushyk 	return 0;
1336c93ef6edSSerhii Iliushyk }
1337c93ef6edSSerhii Iliushyk 
1338c93ef6edSSerhii Iliushyk static int
1339c93ef6edSSerhii Iliushyk eth_dev_stop(struct rte_eth_dev *eth_dev)
1340c93ef6edSSerhii Iliushyk {
1341e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1342c93ef6edSSerhii Iliushyk 
13433489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Port %u", internals->n_intf_no);
1344c93ef6edSSerhii Iliushyk 
1345fe91ade9SDanylo Vodopianov 	if (internals->type != PORT_TYPE_VIRTUAL) {
1346fe91ade9SDanylo Vodopianov 		uint q;
1347fe91ade9SDanylo Vodopianov 
1348fe91ade9SDanylo Vodopianov 		for (q = 0; q < internals->nb_rx_queues; q++)
1349fe91ade9SDanylo Vodopianov 			eth_rx_queue_stop(eth_dev, q);
1350fe91ade9SDanylo Vodopianov 
1351fe91ade9SDanylo Vodopianov 		for (q = 0; q < internals->nb_tx_queues; q++)
1352fe91ade9SDanylo Vodopianov 			eth_tx_queue_stop(eth_dev, q);
1353fe91ade9SDanylo Vodopianov 	}
1354fe91ade9SDanylo Vodopianov 
1355c93ef6edSSerhii Iliushyk 	eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
1356c93ef6edSSerhii Iliushyk 	return 0;
1357c93ef6edSSerhii Iliushyk }
1358c93ef6edSSerhii Iliushyk 
13599147e9f9SSerhii Iliushyk static int
13609147e9f9SSerhii Iliushyk eth_dev_set_link_up(struct rte_eth_dev *eth_dev)
13619147e9f9SSerhii Iliushyk {
13629147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
13639147e9f9SSerhii Iliushyk 
13649147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
13653489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
13669147e9f9SSerhii Iliushyk 		return -1;
13679147e9f9SSerhii Iliushyk 	}
13689147e9f9SSerhii Iliushyk 
1369e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *const internals = eth_dev->data->dev_private;
13709147e9f9SSerhii Iliushyk 
13719147e9f9SSerhii Iliushyk 	struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
13729147e9f9SSerhii Iliushyk 	const int port = internals->n_intf_no;
13739147e9f9SSerhii Iliushyk 
13749147e9f9SSerhii Iliushyk 	if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE)
13759147e9f9SSerhii Iliushyk 		return 0;
13769147e9f9SSerhii Iliushyk 
13779147e9f9SSerhii Iliushyk 	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
13789147e9f9SSerhii Iliushyk 	assert(port == internals->n_intf_no);
13799147e9f9SSerhii Iliushyk 
13809147e9f9SSerhii Iliushyk 	port_ops->set_adm_state(p_adapter_info, port, true);
13819147e9f9SSerhii Iliushyk 
13829147e9f9SSerhii Iliushyk 	return 0;
13839147e9f9SSerhii Iliushyk }
13849147e9f9SSerhii Iliushyk 
13859147e9f9SSerhii Iliushyk static int
13869147e9f9SSerhii Iliushyk eth_dev_set_link_down(struct rte_eth_dev *eth_dev)
13879147e9f9SSerhii Iliushyk {
13889147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
13899147e9f9SSerhii Iliushyk 
13909147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
13913489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
13929147e9f9SSerhii Iliushyk 		return -1;
13939147e9f9SSerhii Iliushyk 	}
13949147e9f9SSerhii Iliushyk 
1395e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *const internals = eth_dev->data->dev_private;
13969147e9f9SSerhii Iliushyk 
13979147e9f9SSerhii Iliushyk 	struct adapter_info_s *p_adapter_info = &internals->p_drv->ntdrv.adapter_info;
13989147e9f9SSerhii Iliushyk 	const int port = internals->n_intf_no;
13999147e9f9SSerhii Iliushyk 
14009147e9f9SSerhii Iliushyk 	if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE)
14019147e9f9SSerhii Iliushyk 		return 0;
14029147e9f9SSerhii Iliushyk 
14039147e9f9SSerhii Iliushyk 	assert(port >= 0 && port < NUM_ADAPTER_PORTS_MAX);
14049147e9f9SSerhii Iliushyk 	assert(port == internals->n_intf_no);
14059147e9f9SSerhii Iliushyk 
14069147e9f9SSerhii Iliushyk 	port_ops->set_link_status(p_adapter_info, port, false);
14079147e9f9SSerhii Iliushyk 
14089147e9f9SSerhii Iliushyk 	return 0;
14099147e9f9SSerhii Iliushyk }
14109147e9f9SSerhii Iliushyk 
1411c93ef6edSSerhii Iliushyk static void
1412c93ef6edSSerhii Iliushyk drv_deinit(struct drv_s *p_drv)
1413c93ef6edSSerhii Iliushyk {
141496c8249bSDanylo Vodopianov 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
141596c8249bSDanylo Vodopianov 
141696c8249bSDanylo Vodopianov 	if (profile_inline_ops == NULL) {
141796c8249bSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
141896c8249bSDanylo Vodopianov 		return;
141996c8249bSDanylo Vodopianov 	}
142096c8249bSDanylo Vodopianov 
1421d3dc3627SSerhii Iliushyk 	const struct adapter_ops *adapter_ops = get_adapter_ops();
1422d3dc3627SSerhii Iliushyk 
1423d3dc3627SSerhii Iliushyk 	if (adapter_ops == NULL) {
14243489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Adapter module uninitialized");
1425d3dc3627SSerhii Iliushyk 		return;
1426d3dc3627SSerhii Iliushyk 	}
1427d3dc3627SSerhii Iliushyk 
1428c93ef6edSSerhii Iliushyk 	if (p_drv == NULL)
1429c93ef6edSSerhii Iliushyk 		return;
1430c93ef6edSSerhii Iliushyk 
1431d3dc3627SSerhii Iliushyk 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
143296c8249bSDanylo Vodopianov 	fpga_info_t *fpga_info = &p_nt_drv->adapter_info.fpga_info;
143396c8249bSDanylo Vodopianov 
143496c8249bSDanylo Vodopianov 	/*
143596c8249bSDanylo Vodopianov 	 * Mark the global pdrv for cleared. Used by some threads to terminate.
143696c8249bSDanylo Vodopianov 	 * 1 second to give the threads a chance to see the termonation.
143796c8249bSDanylo Vodopianov 	 */
143896c8249bSDanylo Vodopianov 	clear_pdrv(p_drv);
143996c8249bSDanylo Vodopianov 	nt_os_wait_usec(1000000);
144096c8249bSDanylo Vodopianov 
144196c8249bSDanylo Vodopianov 	/* stop statistics threads */
144296c8249bSDanylo Vodopianov 	p_drv->ntdrv.b_shutdown = true;
1443a1ba8c47SDanylo Vodopianov 	THREAD_JOIN(p_nt_drv->stat_thread);
144496c8249bSDanylo Vodopianov 
144596c8249bSDanylo Vodopianov 	if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
144696c8249bSDanylo Vodopianov 		THREAD_JOIN(p_nt_drv->flm_thread);
144796c8249bSDanylo Vodopianov 		profile_inline_ops->flm_free_queues();
1448e7e49ce6SDanylo Vodopianov 		THREAD_JOIN(p_nt_drv->port_event_thread);
14494f0f5ab0SDanylo Vodopianov 		/* Free all local flm event queues */
14504f0f5ab0SDanylo Vodopianov 		flm_inf_sta_queue_free_all(FLM_INFO_LOCAL);
14514f0f5ab0SDanylo Vodopianov 		/* Free all remote flm event queues */
14524f0f5ab0SDanylo Vodopianov 		flm_inf_sta_queue_free_all(FLM_INFO_REMOTE);
14534f0f5ab0SDanylo Vodopianov 		/* Free all aged flow event queues */
14544f0f5ab0SDanylo Vodopianov 		flm_age_queue_free_all();
145596c8249bSDanylo Vodopianov 	}
1456d3dc3627SSerhii Iliushyk 
1457d3dc3627SSerhii Iliushyk 	/* stop adapter */
1458d3dc3627SSerhii Iliushyk 	adapter_ops->deinit(&p_nt_drv->adapter_info);
1459d3dc3627SSerhii Iliushyk 
1460c93ef6edSSerhii Iliushyk 	/* clean memory */
1461c93ef6edSSerhii Iliushyk 	rte_free(p_drv);
1462c93ef6edSSerhii Iliushyk 	p_drv = NULL;
1463c93ef6edSSerhii Iliushyk }
1464c93ef6edSSerhii Iliushyk 
1465c93ef6edSSerhii Iliushyk static int
1466c93ef6edSSerhii Iliushyk eth_dev_close(struct rte_eth_dev *eth_dev)
1467c93ef6edSSerhii Iliushyk {
1468e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1469c93ef6edSSerhii Iliushyk 	struct drv_s *p_drv = internals->p_drv;
1470c93ef6edSSerhii Iliushyk 
1471b0cd36e9SDanylo Vodopianov 	if (internals->type != PORT_TYPE_VIRTUAL) {
1472b0cd36e9SDanylo Vodopianov 		struct ntnic_rx_queue *rx_q = internals->rxq_scg;
1473b0cd36e9SDanylo Vodopianov 		struct ntnic_tx_queue *tx_q = internals->txq_scg;
1474b0cd36e9SDanylo Vodopianov 
1475b0cd36e9SDanylo Vodopianov 		uint q;
1476b0cd36e9SDanylo Vodopianov 
1477b0cd36e9SDanylo Vodopianov 		if (sg_ops != NULL) {
1478b0cd36e9SDanylo Vodopianov 			for (q = 0; q < internals->nb_rx_queues; q++)
1479b0cd36e9SDanylo Vodopianov 				sg_ops->nthw_release_mngd_rx_virt_queue(rx_q[q].vq);
1480b0cd36e9SDanylo Vodopianov 
1481b0cd36e9SDanylo Vodopianov 			for (q = 0; q < internals->nb_tx_queues; q++)
1482b0cd36e9SDanylo Vodopianov 				sg_ops->nthw_release_mngd_tx_virt_queue(tx_q[q].vq);
1483b0cd36e9SDanylo Vodopianov 		}
1484b0cd36e9SDanylo Vodopianov 	}
1485b0cd36e9SDanylo Vodopianov 
1486c93ef6edSSerhii Iliushyk 	internals->p_drv = NULL;
1487c93ef6edSSerhii Iliushyk 
14883de5fe79SDanylo Vodopianov 	if (p_drv) {
1489c93ef6edSSerhii Iliushyk 		/* decrease initialized ethernet devices */
1490c93ef6edSSerhii Iliushyk 		p_drv->n_eth_dev_init_count--;
1491c93ef6edSSerhii Iliushyk 
1492c93ef6edSSerhii Iliushyk 		/*
1493c93ef6edSSerhii Iliushyk 		 * rte_pci_dev has no private member for p_drv
1494c93ef6edSSerhii Iliushyk 		 * wait until all rte_eth_dev's are closed - then close adapters via p_drv
1495c93ef6edSSerhii Iliushyk 		 */
14963de5fe79SDanylo Vodopianov 		if (!p_drv->n_eth_dev_init_count)
1497c93ef6edSSerhii Iliushyk 			drv_deinit(p_drv);
14983de5fe79SDanylo Vodopianov 	}
1499c93ef6edSSerhii Iliushyk 
1500c93ef6edSSerhii Iliushyk 	return 0;
1501c93ef6edSSerhii Iliushyk }
1502c93ef6edSSerhii Iliushyk 
1503ddf184d0SSerhii Iliushyk static int
1504ddf184d0SSerhii Iliushyk eth_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version, size_t fw_size)
1505ddf184d0SSerhii Iliushyk {
1506e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1507ddf184d0SSerhii Iliushyk 
15089147e9f9SSerhii Iliushyk 	if (internals->type == PORT_TYPE_VIRTUAL || internals->type == PORT_TYPE_OVERRIDE)
15099147e9f9SSerhii Iliushyk 		return 0;
15109147e9f9SSerhii Iliushyk 
1511ddf184d0SSerhii Iliushyk 	fpga_info_t *fpga_info = &internals->p_drv->ntdrv.adapter_info.fpga_info;
1512ddf184d0SSerhii Iliushyk 	const int length = snprintf(fw_version, fw_size, "%03d-%04d-%02d-%02d",
1513ddf184d0SSerhii Iliushyk 			fpga_info->n_fpga_type_id, fpga_info->n_fpga_prod_id,
1514ddf184d0SSerhii Iliushyk 			fpga_info->n_fpga_ver_id, fpga_info->n_fpga_rev_id);
1515ddf184d0SSerhii Iliushyk 
1516ddf184d0SSerhii Iliushyk 	if ((size_t)length < fw_size) {
1517ddf184d0SSerhii Iliushyk 		/* We have space for the version string */
1518ddf184d0SSerhii Iliushyk 		return 0;
1519ddf184d0SSerhii Iliushyk 
1520ddf184d0SSerhii Iliushyk 	} else {
1521ddf184d0SSerhii Iliushyk 		/* We do not have space for the version string -return the needed space */
1522ddf184d0SSerhii Iliushyk 		return length + 1;
1523ddf184d0SSerhii Iliushyk 	}
1524ddf184d0SSerhii Iliushyk }
1525ddf184d0SSerhii Iliushyk 
1526ed01e436SSerhii Iliushyk static int dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused, const struct rte_flow_ops **ops)
1527ed01e436SSerhii Iliushyk {
1528ed01e436SSerhii Iliushyk 	*ops = get_dev_flow_ops();
1529ed01e436SSerhii Iliushyk 	return 0;
1530ed01e436SSerhii Iliushyk }
1531ed01e436SSerhii Iliushyk 
1532cf6007eaSDanylo Vodopianov static int eth_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *stats, unsigned int n)
1533cf6007eaSDanylo Vodopianov {
1534e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1535cf6007eaSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1536cf6007eaSDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1537cf6007eaSDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1538cf6007eaSDanylo Vodopianov 	int if_index = internals->n_intf_no;
1539cf6007eaSDanylo Vodopianov 	int nb_xstats;
1540cf6007eaSDanylo Vodopianov 
1541cf6007eaSDanylo Vodopianov 	const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops();
1542cf6007eaSDanylo Vodopianov 
1543cf6007eaSDanylo Vodopianov 	if (ntnic_xstats_ops == NULL) {
1544cf6007eaSDanylo Vodopianov 		NT_LOG(INF, NTNIC, "ntnic_xstats module not included");
1545cf6007eaSDanylo Vodopianov 		return -1;
1546cf6007eaSDanylo Vodopianov 	}
1547cf6007eaSDanylo Vodopianov 
15482407c755SSerhii Iliushyk 	rte_spinlock_lock(&p_nt_drv->stat_lck);
1549cf6007eaSDanylo Vodopianov 	nb_xstats = ntnic_xstats_ops->nthw_xstats_get(p_nt4ga_stat, stats, n, if_index);
15502407c755SSerhii Iliushyk 	rte_spinlock_unlock(&p_nt_drv->stat_lck);
1551cf6007eaSDanylo Vodopianov 	return nb_xstats;
1552cf6007eaSDanylo Vodopianov }
1553cf6007eaSDanylo Vodopianov 
1554cf6007eaSDanylo Vodopianov static int eth_xstats_get_by_id(struct rte_eth_dev *eth_dev,
1555cf6007eaSDanylo Vodopianov 	const uint64_t *ids,
1556cf6007eaSDanylo Vodopianov 	uint64_t *values,
1557cf6007eaSDanylo Vodopianov 	unsigned int n)
1558cf6007eaSDanylo Vodopianov {
1559e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1560cf6007eaSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1561cf6007eaSDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1562cf6007eaSDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1563cf6007eaSDanylo Vodopianov 	int if_index = internals->n_intf_no;
1564cf6007eaSDanylo Vodopianov 	int nb_xstats;
1565cf6007eaSDanylo Vodopianov 
1566cf6007eaSDanylo Vodopianov 	const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops();
1567cf6007eaSDanylo Vodopianov 
1568cf6007eaSDanylo Vodopianov 	if (ntnic_xstats_ops == NULL) {
1569cf6007eaSDanylo Vodopianov 		NT_LOG(INF, NTNIC, "ntnic_xstats module not included");
1570cf6007eaSDanylo Vodopianov 		return -1;
1571cf6007eaSDanylo Vodopianov 	}
1572cf6007eaSDanylo Vodopianov 
15732407c755SSerhii Iliushyk 	rte_spinlock_lock(&p_nt_drv->stat_lck);
1574cf6007eaSDanylo Vodopianov 	nb_xstats =
1575cf6007eaSDanylo Vodopianov 		ntnic_xstats_ops->nthw_xstats_get_by_id(p_nt4ga_stat, ids, values, n, if_index);
15762407c755SSerhii Iliushyk 	rte_spinlock_unlock(&p_nt_drv->stat_lck);
1577cf6007eaSDanylo Vodopianov 	return nb_xstats;
1578cf6007eaSDanylo Vodopianov }
1579cf6007eaSDanylo Vodopianov 
1580cf6007eaSDanylo Vodopianov static int eth_xstats_reset(struct rte_eth_dev *eth_dev)
1581cf6007eaSDanylo Vodopianov {
1582e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1583cf6007eaSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1584cf6007eaSDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1585cf6007eaSDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1586cf6007eaSDanylo Vodopianov 	int if_index = internals->n_intf_no;
1587cf6007eaSDanylo Vodopianov 
1588cf6007eaSDanylo Vodopianov 	struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops();
1589cf6007eaSDanylo Vodopianov 
1590cf6007eaSDanylo Vodopianov 	if (ntnic_xstats_ops == NULL) {
1591cf6007eaSDanylo Vodopianov 		NT_LOG(INF, NTNIC, "ntnic_xstats module not included");
1592cf6007eaSDanylo Vodopianov 		return -1;
1593cf6007eaSDanylo Vodopianov 	}
1594cf6007eaSDanylo Vodopianov 
15952407c755SSerhii Iliushyk 	rte_spinlock_lock(&p_nt_drv->stat_lck);
1596cf6007eaSDanylo Vodopianov 	ntnic_xstats_ops->nthw_xstats_reset(p_nt4ga_stat, if_index);
15972407c755SSerhii Iliushyk 	rte_spinlock_unlock(&p_nt_drv->stat_lck);
1598cf6007eaSDanylo Vodopianov 	return dpdk_stats_reset(internals, p_nt_drv, if_index);
1599cf6007eaSDanylo Vodopianov }
1600cf6007eaSDanylo Vodopianov 
1601cf6007eaSDanylo Vodopianov static int eth_xstats_get_names(struct rte_eth_dev *eth_dev,
1602cf6007eaSDanylo Vodopianov 	struct rte_eth_xstat_name *xstats_names, unsigned int size)
1603cf6007eaSDanylo Vodopianov {
1604e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1605cf6007eaSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1606cf6007eaSDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1607cf6007eaSDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1608cf6007eaSDanylo Vodopianov 
1609cf6007eaSDanylo Vodopianov 	const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops();
1610cf6007eaSDanylo Vodopianov 
1611cf6007eaSDanylo Vodopianov 	if (ntnic_xstats_ops == NULL) {
1612cf6007eaSDanylo Vodopianov 		NT_LOG(INF, NTNIC, "ntnic_xstats module not included");
1613cf6007eaSDanylo Vodopianov 		return -1;
1614cf6007eaSDanylo Vodopianov 	}
1615cf6007eaSDanylo Vodopianov 
1616cf6007eaSDanylo Vodopianov 	return ntnic_xstats_ops->nthw_xstats_get_names(p_nt4ga_stat, xstats_names, size);
1617cf6007eaSDanylo Vodopianov }
1618cf6007eaSDanylo Vodopianov 
1619cf6007eaSDanylo Vodopianov static int eth_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
1620cf6007eaSDanylo Vodopianov 	const uint64_t *ids,
1621cf6007eaSDanylo Vodopianov 	struct rte_eth_xstat_name *xstats_names,
1622cf6007eaSDanylo Vodopianov 	unsigned int size)
1623cf6007eaSDanylo Vodopianov {
1624e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
1625cf6007eaSDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1626cf6007eaSDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1627cf6007eaSDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1628cf6007eaSDanylo Vodopianov 	const struct ntnic_xstats_ops *ntnic_xstats_ops = get_ntnic_xstats_ops();
1629cf6007eaSDanylo Vodopianov 
1630cf6007eaSDanylo Vodopianov 	if (ntnic_xstats_ops == NULL) {
1631cf6007eaSDanylo Vodopianov 		NT_LOG(INF, NTNIC, "ntnic_xstats module not included");
1632cf6007eaSDanylo Vodopianov 		return -1;
1633cf6007eaSDanylo Vodopianov 	}
1634cf6007eaSDanylo Vodopianov 
1635cf6007eaSDanylo Vodopianov 	return ntnic_xstats_ops->nthw_xstats_get_names_by_id(p_nt4ga_stat, xstats_names, ids,
1636cf6007eaSDanylo Vodopianov 			size);
1637cf6007eaSDanylo Vodopianov }
1638cf6007eaSDanylo Vodopianov 
16399147e9f9SSerhii Iliushyk static int
16409147e9f9SSerhii Iliushyk promiscuous_enable(struct rte_eth_dev __rte_unused(*dev))
16419147e9f9SSerhii Iliushyk {
16423489b87bSDanylo Vodopianov 	NT_LOG(DBG, NTHW, "The device always run promiscuous mode");
16439147e9f9SSerhii Iliushyk 	return 0;
16449147e9f9SSerhii Iliushyk }
16459147e9f9SSerhii Iliushyk 
16468eed292bSSerhii Iliushyk static int eth_dev_rss_hash_update(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf)
16478eed292bSSerhii Iliushyk {
16488eed292bSSerhii Iliushyk 	const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops();
16498eed292bSSerhii Iliushyk 
16508eed292bSSerhii Iliushyk 	if (flow_filter_ops == NULL) {
16518eed292bSSerhii Iliushyk 		NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized");
16528eed292bSSerhii Iliushyk 		return -1;
16538eed292bSSerhii Iliushyk 	}
16548eed292bSSerhii Iliushyk 
1655e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
16568eed292bSSerhii Iliushyk 
16578eed292bSSerhii Iliushyk 	struct flow_nic_dev *ndev = internals->flw_dev->ndev;
16588eed292bSSerhii Iliushyk 	struct nt_eth_rss_conf tmp_rss_conf = { 0 };
16598eed292bSSerhii Iliushyk 	const int hsh_idx = 0;	/* hsh index 0 means the default receipt in HSH module */
16608eed292bSSerhii Iliushyk 
16618eed292bSSerhii Iliushyk 	if (rss_conf->rss_key != NULL) {
16628eed292bSSerhii Iliushyk 		if (rss_conf->rss_key_len > MAX_RSS_KEY_LEN) {
16638eed292bSSerhii Iliushyk 			NT_LOG(ERR, NTNIC,
16648eed292bSSerhii Iliushyk 				"ERROR: - RSS hash key length %u exceeds maximum value %u",
16658eed292bSSerhii Iliushyk 				rss_conf->rss_key_len, MAX_RSS_KEY_LEN);
16668eed292bSSerhii Iliushyk 			return -1;
16678eed292bSSerhii Iliushyk 		}
16688eed292bSSerhii Iliushyk 
16698eed292bSSerhii Iliushyk 		rte_memcpy(&tmp_rss_conf.rss_key, rss_conf->rss_key, rss_conf->rss_key_len);
16708eed292bSSerhii Iliushyk 	}
16718eed292bSSerhii Iliushyk 
16728eed292bSSerhii Iliushyk 	tmp_rss_conf.algorithm = rss_conf->algorithm;
16738eed292bSSerhii Iliushyk 
16748eed292bSSerhii Iliushyk 	tmp_rss_conf.rss_hf = rss_conf->rss_hf;
16758eed292bSSerhii Iliushyk 	int res = flow_filter_ops->flow_nic_set_hasher_fields(ndev, hsh_idx, tmp_rss_conf);
16768eed292bSSerhii Iliushyk 
16778eed292bSSerhii Iliushyk 	if (res == 0) {
16788eed292bSSerhii Iliushyk 		flow_filter_ops->hw_mod_hsh_rcp_flush(&ndev->be, hsh_idx, 1);
16798eed292bSSerhii Iliushyk 		rte_memcpy(&ndev->rss_conf, &tmp_rss_conf, sizeof(struct nt_eth_rss_conf));
16808eed292bSSerhii Iliushyk 
16818eed292bSSerhii Iliushyk 	} else {
16828eed292bSSerhii Iliushyk 		NT_LOG(ERR, NTNIC, "ERROR: - RSS hash update failed with error %i", res);
16838eed292bSSerhii Iliushyk 	}
16848eed292bSSerhii Iliushyk 
16858eed292bSSerhii Iliushyk 	return res;
16868eed292bSSerhii Iliushyk }
16878eed292bSSerhii Iliushyk 
16888eed292bSSerhii Iliushyk static int rss_hash_conf_get(struct rte_eth_dev *eth_dev, struct rte_eth_rss_conf *rss_conf)
16898eed292bSSerhii Iliushyk {
1690e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
16918eed292bSSerhii Iliushyk 	struct flow_nic_dev *ndev = internals->flw_dev->ndev;
16928eed292bSSerhii Iliushyk 
16938eed292bSSerhii Iliushyk 	rss_conf->algorithm = (enum rte_eth_hash_function)ndev->rss_conf.algorithm;
16948eed292bSSerhii Iliushyk 
16958eed292bSSerhii Iliushyk 	rss_conf->rss_hf = ndev->rss_conf.rss_hf;
16968eed292bSSerhii Iliushyk 
16978eed292bSSerhii Iliushyk 	/*
16988eed292bSSerhii Iliushyk 	 * copy full stored key into rss_key and pad it with
16998eed292bSSerhii Iliushyk 	 * zeros up to rss_key_len / MAX_RSS_KEY_LEN
17008eed292bSSerhii Iliushyk 	 */
17018eed292bSSerhii Iliushyk 	if (rss_conf->rss_key != NULL) {
17028eed292bSSerhii Iliushyk 		int key_len = RTE_MIN(rss_conf->rss_key_len, MAX_RSS_KEY_LEN);
17038eed292bSSerhii Iliushyk 		memset(rss_conf->rss_key, 0, rss_conf->rss_key_len);
17048eed292bSSerhii Iliushyk 		rte_memcpy(rss_conf->rss_key, &ndev->rss_conf.rss_key, key_len);
17058eed292bSSerhii Iliushyk 		rss_conf->rss_key_len = key_len;
17068eed292bSSerhii Iliushyk 	}
17078eed292bSSerhii Iliushyk 
17088eed292bSSerhii Iliushyk 	return 0;
17098eed292bSSerhii Iliushyk }
17108eed292bSSerhii Iliushyk 
1711c35c06fbSDanylo Vodopianov static struct eth_dev_ops nthw_eth_dev_ops = {
1712c93ef6edSSerhii Iliushyk 	.dev_configure = eth_dev_configure,
1713c93ef6edSSerhii Iliushyk 	.dev_start = eth_dev_start,
1714c93ef6edSSerhii Iliushyk 	.dev_stop = eth_dev_stop,
17159147e9f9SSerhii Iliushyk 	.dev_set_link_up = eth_dev_set_link_up,
17169147e9f9SSerhii Iliushyk 	.dev_set_link_down = eth_dev_set_link_down,
1717c93ef6edSSerhii Iliushyk 	.dev_close = eth_dev_close,
17189147e9f9SSerhii Iliushyk 	.link_update = eth_link_update,
1719effa0469SDanylo Vodopianov 	.stats_get = eth_stats_get,
1720effa0469SDanylo Vodopianov 	.stats_reset = eth_stats_reset,
1721c93ef6edSSerhii Iliushyk 	.dev_infos_get = eth_dev_infos_get,
1722ddf184d0SSerhii Iliushyk 	.fw_version_get = eth_fw_version_get,
17236b0047faSDanylo Vodopianov 	.rx_queue_setup = eth_rx_scg_queue_setup,
1724fe91ade9SDanylo Vodopianov 	.rx_queue_start = eth_rx_queue_start,
1725fe91ade9SDanylo Vodopianov 	.rx_queue_stop = eth_rx_queue_stop,
1726fe91ade9SDanylo Vodopianov 	.rx_queue_release = eth_rx_queue_release,
17276b0047faSDanylo Vodopianov 	.tx_queue_setup = eth_tx_scg_queue_setup,
1728fe91ade9SDanylo Vodopianov 	.tx_queue_start = eth_tx_queue_start,
1729fe91ade9SDanylo Vodopianov 	.tx_queue_stop = eth_tx_queue_stop,
1730fe91ade9SDanylo Vodopianov 	.tx_queue_release = eth_tx_queue_release,
17319147e9f9SSerhii Iliushyk 	.mac_addr_add = eth_mac_addr_add,
17329147e9f9SSerhii Iliushyk 	.mac_addr_set = eth_mac_addr_set,
17339147e9f9SSerhii Iliushyk 	.set_mc_addr_list = eth_set_mc_addr_list,
1734c35c06fbSDanylo Vodopianov 	.mtr_ops_get = NULL,
1735ed01e436SSerhii Iliushyk 	.flow_ops_get = dev_flow_ops_get,
1736cf6007eaSDanylo Vodopianov 	.xstats_get = eth_xstats_get,
1737cf6007eaSDanylo Vodopianov 	.xstats_get_names = eth_xstats_get_names,
1738cf6007eaSDanylo Vodopianov 	.xstats_reset = eth_xstats_reset,
1739cf6007eaSDanylo Vodopianov 	.xstats_get_by_id = eth_xstats_get_by_id,
1740cf6007eaSDanylo Vodopianov 	.xstats_get_names_by_id = eth_xstats_get_names_by_id,
1741*6019656dSOleksandr Kolomeiets 	.mtu_set = NULL,
17429147e9f9SSerhii Iliushyk 	.promiscuous_enable = promiscuous_enable,
17438eed292bSSerhii Iliushyk 	.rss_hash_update = eth_dev_rss_hash_update,
17448eed292bSSerhii Iliushyk 	.rss_hash_conf_get = rss_hash_conf_get,
1745c93ef6edSSerhii Iliushyk };
1746c93ef6edSSerhii Iliushyk 
174796c8249bSDanylo Vodopianov /*
1748e7e49ce6SDanylo Vodopianov  * Port event thread
1749e7e49ce6SDanylo Vodopianov  */
1750e7e49ce6SDanylo Vodopianov THREAD_FUNC port_event_thread_fn(void *context)
1751e7e49ce6SDanylo Vodopianov {
1752e0d9b3cdSSerhii Iliushyk 	struct pmd_internals *internals = context;
1753e7e49ce6SDanylo Vodopianov 	struct drv_s *p_drv = internals->p_drv;
1754e7e49ce6SDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1755e7e49ce6SDanylo Vodopianov 	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
1756e7e49ce6SDanylo Vodopianov 	struct flow_nic_dev *ndev = p_adapter_info->nt4ga_filter.mp_flow_device;
1757e7e49ce6SDanylo Vodopianov 
1758e7e49ce6SDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1759e7e49ce6SDanylo Vodopianov 	struct rte_eth_dev *eth_dev = &rte_eth_devices[internals->port_id];
1760e7e49ce6SDanylo Vodopianov 	uint8_t port_no = internals->port;
1761e7e49ce6SDanylo Vodopianov 
1762e7e49ce6SDanylo Vodopianov 	ntnic_flm_load_t flmdata;
1763e7e49ce6SDanylo Vodopianov 	ntnic_port_load_t portdata;
1764e7e49ce6SDanylo Vodopianov 
1765e7e49ce6SDanylo Vodopianov 	memset(&flmdata, 0, sizeof(flmdata));
1766e7e49ce6SDanylo Vodopianov 	memset(&portdata, 0, sizeof(portdata));
1767e7e49ce6SDanylo Vodopianov 
1768e7e49ce6SDanylo Vodopianov 	while (ndev != NULL && ndev->eth_base == NULL)
1769e7e49ce6SDanylo Vodopianov 		nt_os_wait_usec(1 * 1000 * 1000);
1770e7e49ce6SDanylo Vodopianov 
1771e7e49ce6SDanylo Vodopianov 	while (!p_drv->ntdrv.b_shutdown) {
1772e7e49ce6SDanylo Vodopianov 		/*
1773e7e49ce6SDanylo Vodopianov 		 * FLM load measurement
1774e7e49ce6SDanylo Vodopianov 		 * Do only send event, if there has been a change
1775e7e49ce6SDanylo Vodopianov 		 */
1776e7e49ce6SDanylo Vodopianov 		if (p_nt4ga_stat->flm_stat_ver > 22 && p_nt4ga_stat->mp_stat_structs_flm) {
1777e7e49ce6SDanylo Vodopianov 			if (flmdata.lookup != p_nt4ga_stat->mp_stat_structs_flm->load_lps ||
1778e7e49ce6SDanylo Vodopianov 				flmdata.access != p_nt4ga_stat->mp_stat_structs_flm->load_aps) {
17792407c755SSerhii Iliushyk 				rte_spinlock_lock(&p_nt_drv->stat_lck);
1780e7e49ce6SDanylo Vodopianov 				flmdata.lookup = p_nt4ga_stat->mp_stat_structs_flm->load_lps;
1781e7e49ce6SDanylo Vodopianov 				flmdata.access = p_nt4ga_stat->mp_stat_structs_flm->load_aps;
1782e7e49ce6SDanylo Vodopianov 				flmdata.lookup_maximum =
1783e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_stat_structs_flm->max_lps;
1784e7e49ce6SDanylo Vodopianov 				flmdata.access_maximum =
1785e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_stat_structs_flm->max_aps;
17862407c755SSerhii Iliushyk 				rte_spinlock_unlock(&p_nt_drv->stat_lck);
1787e7e49ce6SDanylo Vodopianov 
1788e7e49ce6SDanylo Vodopianov 				if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
1789e7e49ce6SDanylo Vodopianov 					rte_eth_dev_callback_process(eth_dev,
1790e7e49ce6SDanylo Vodopianov 						(enum rte_eth_event_type)RTE_NTNIC_FLM_LOAD_EVENT,
1791e7e49ce6SDanylo Vodopianov 						&flmdata);
1792e7e49ce6SDanylo Vodopianov 				}
1793e7e49ce6SDanylo Vodopianov 			}
1794e7e49ce6SDanylo Vodopianov 		}
1795e7e49ce6SDanylo Vodopianov 
1796e7e49ce6SDanylo Vodopianov 		/*
1797e7e49ce6SDanylo Vodopianov 		 * Port load measurement
1798e7e49ce6SDanylo Vodopianov 		 * Do only send event, if there has been a change.
1799e7e49ce6SDanylo Vodopianov 		 */
1800e7e49ce6SDanylo Vodopianov 		if (p_nt4ga_stat->mp_port_load) {
1801e7e49ce6SDanylo Vodopianov 			if (portdata.rx_bps != p_nt4ga_stat->mp_port_load[port_no].rx_bps ||
1802e7e49ce6SDanylo Vodopianov 				portdata.tx_bps != p_nt4ga_stat->mp_port_load[port_no].tx_bps) {
18032407c755SSerhii Iliushyk 				rte_spinlock_lock(&p_nt_drv->stat_lck);
1804e7e49ce6SDanylo Vodopianov 				portdata.rx_bps = p_nt4ga_stat->mp_port_load[port_no].rx_bps;
1805e7e49ce6SDanylo Vodopianov 				portdata.tx_bps = p_nt4ga_stat->mp_port_load[port_no].tx_bps;
1806e7e49ce6SDanylo Vodopianov 				portdata.rx_pps = p_nt4ga_stat->mp_port_load[port_no].rx_pps;
1807e7e49ce6SDanylo Vodopianov 				portdata.tx_pps = p_nt4ga_stat->mp_port_load[port_no].tx_pps;
1808e7e49ce6SDanylo Vodopianov 				portdata.rx_pps_maximum =
1809e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_port_load[port_no].rx_pps_max;
1810e7e49ce6SDanylo Vodopianov 				portdata.tx_pps_maximum =
1811e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_port_load[port_no].tx_pps_max;
1812e7e49ce6SDanylo Vodopianov 				portdata.rx_bps_maximum =
1813e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_port_load[port_no].rx_bps_max;
1814e7e49ce6SDanylo Vodopianov 				portdata.tx_bps_maximum =
1815e7e49ce6SDanylo Vodopianov 					p_nt4ga_stat->mp_port_load[port_no].tx_bps_max;
18162407c755SSerhii Iliushyk 				rte_spinlock_unlock(&p_nt_drv->stat_lck);
1817e7e49ce6SDanylo Vodopianov 
1818e7e49ce6SDanylo Vodopianov 				if (eth_dev && eth_dev->data && eth_dev->data->dev_private) {
1819e7e49ce6SDanylo Vodopianov 					rte_eth_dev_callback_process(eth_dev,
1820e7e49ce6SDanylo Vodopianov 						(enum rte_eth_event_type)RTE_NTNIC_PORT_LOAD_EVENT,
1821e7e49ce6SDanylo Vodopianov 						&portdata);
1822e7e49ce6SDanylo Vodopianov 				}
1823e7e49ce6SDanylo Vodopianov 			}
1824e7e49ce6SDanylo Vodopianov 		}
1825e7e49ce6SDanylo Vodopianov 
1826e7e49ce6SDanylo Vodopianov 		/* Process events */
1827e7e49ce6SDanylo Vodopianov 		{
1828e7e49ce6SDanylo Vodopianov 			int count = 0;
1829e7e49ce6SDanylo Vodopianov 			bool do_wait = true;
1830e7e49ce6SDanylo Vodopianov 
1831e7e49ce6SDanylo Vodopianov 			while (count < 5000) {
1832e7e49ce6SDanylo Vodopianov 				/* Local FLM statistic events */
1833e7e49ce6SDanylo Vodopianov 				struct flm_info_event_s data;
1834e7e49ce6SDanylo Vodopianov 
1835e7e49ce6SDanylo Vodopianov 				if (flm_inf_queue_get(port_no, FLM_INFO_LOCAL, &data) == 0) {
1836e7e49ce6SDanylo Vodopianov 					if (eth_dev && eth_dev->data &&
1837e7e49ce6SDanylo Vodopianov 						eth_dev->data->dev_private) {
1838e7e49ce6SDanylo Vodopianov 						struct ntnic_flm_statistic_s event_data;
1839e7e49ce6SDanylo Vodopianov 						event_data.bytes = data.bytes;
1840e7e49ce6SDanylo Vodopianov 						event_data.packets = data.packets;
1841e7e49ce6SDanylo Vodopianov 						event_data.cause = data.cause;
1842e7e49ce6SDanylo Vodopianov 						event_data.id = data.id;
1843e7e49ce6SDanylo Vodopianov 						event_data.timestamp = data.timestamp;
1844e7e49ce6SDanylo Vodopianov 						rte_eth_dev_callback_process(eth_dev,
1845e7e49ce6SDanylo Vodopianov 							(enum rte_eth_event_type)
1846e7e49ce6SDanylo Vodopianov 							RTE_NTNIC_FLM_STATS_EVENT,
1847e7e49ce6SDanylo Vodopianov 							&event_data);
1848e7e49ce6SDanylo Vodopianov 						do_wait = false;
1849e7e49ce6SDanylo Vodopianov 					}
1850e7e49ce6SDanylo Vodopianov 				}
1851e7e49ce6SDanylo Vodopianov 
1852c0d44442SDanylo Vodopianov 				/* AGED event */
1853c0d44442SDanylo Vodopianov 				/* Note: RTE_FLOW_PORT_FLAG_STRICT_QUEUE flag is not supported so
1854c0d44442SDanylo Vodopianov 				 * event is always generated
1855c0d44442SDanylo Vodopianov 				 */
1856c0d44442SDanylo Vodopianov 				int aged_event_count = flm_age_event_get(port_no);
1857c0d44442SDanylo Vodopianov 
1858c0d44442SDanylo Vodopianov 				if (aged_event_count > 0 && eth_dev && eth_dev->data &&
1859c0d44442SDanylo Vodopianov 					eth_dev->data->dev_private) {
1860c0d44442SDanylo Vodopianov 					rte_eth_dev_callback_process(eth_dev,
1861c0d44442SDanylo Vodopianov 						RTE_ETH_EVENT_FLOW_AGED,
1862c0d44442SDanylo Vodopianov 						NULL);
1863c0d44442SDanylo Vodopianov 					flm_age_event_clear(port_no);
1864c0d44442SDanylo Vodopianov 					do_wait = false;
1865c0d44442SDanylo Vodopianov 				}
1866c0d44442SDanylo Vodopianov 
1867e7e49ce6SDanylo Vodopianov 				if (do_wait)
1868e7e49ce6SDanylo Vodopianov 					nt_os_wait_usec(10);
1869e7e49ce6SDanylo Vodopianov 
1870e7e49ce6SDanylo Vodopianov 				count++;
1871e7e49ce6SDanylo Vodopianov 				do_wait = true;
1872e7e49ce6SDanylo Vodopianov 			}
1873e7e49ce6SDanylo Vodopianov 		}
1874e7e49ce6SDanylo Vodopianov 	}
1875e7e49ce6SDanylo Vodopianov 
1876e7e49ce6SDanylo Vodopianov 	return THREAD_RETURN;
1877e7e49ce6SDanylo Vodopianov }
1878e7e49ce6SDanylo Vodopianov 
1879e7e49ce6SDanylo Vodopianov /*
188096c8249bSDanylo Vodopianov  * Adapter flm stat thread
188196c8249bSDanylo Vodopianov  */
188296c8249bSDanylo Vodopianov THREAD_FUNC adapter_flm_update_thread_fn(void *context)
188396c8249bSDanylo Vodopianov {
188496c8249bSDanylo Vodopianov 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
188596c8249bSDanylo Vodopianov 
188696c8249bSDanylo Vodopianov 	if (profile_inline_ops == NULL) {
188796c8249bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "%s: profile_inline module uninitialized", __func__);
188896c8249bSDanylo Vodopianov 		return THREAD_RETURN;
188996c8249bSDanylo Vodopianov 	}
189096c8249bSDanylo Vodopianov 
189196c8249bSDanylo Vodopianov 	struct drv_s *p_drv = context;
189296c8249bSDanylo Vodopianov 
189396c8249bSDanylo Vodopianov 	struct ntdrv_4ga_s *p_nt_drv = &p_drv->ntdrv;
189496c8249bSDanylo Vodopianov 	struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
189596c8249bSDanylo Vodopianov 	struct nt4ga_filter_s *p_nt4ga_filter = &p_adapter_info->nt4ga_filter;
189696c8249bSDanylo Vodopianov 	struct flow_nic_dev *p_flow_nic_dev = p_nt4ga_filter->mp_flow_device;
189796c8249bSDanylo Vodopianov 
189896c8249bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "%s: %s: waiting for port configuration",
189996c8249bSDanylo Vodopianov 		p_adapter_info->mp_adapter_id_str, __func__);
190096c8249bSDanylo Vodopianov 
190196c8249bSDanylo Vodopianov 	while (p_flow_nic_dev->eth_base == NULL)
190296c8249bSDanylo Vodopianov 		nt_os_wait_usec(1 * 1000 * 1000);
190396c8249bSDanylo Vodopianov 
190496c8249bSDanylo Vodopianov 	struct flow_eth_dev *dev = p_flow_nic_dev->eth_base;
190596c8249bSDanylo Vodopianov 
190696c8249bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "%s: %s: begin", p_adapter_info->mp_adapter_id_str, __func__);
190796c8249bSDanylo Vodopianov 
190896c8249bSDanylo Vodopianov 	while (!p_drv->ntdrv.b_shutdown)
190996c8249bSDanylo Vodopianov 		if (profile_inline_ops->flm_update(dev) == 0)
191096c8249bSDanylo Vodopianov 			nt_os_wait_usec(10);
191196c8249bSDanylo Vodopianov 
191296c8249bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "%s: %s: end", p_adapter_info->mp_adapter_id_str, __func__);
191396c8249bSDanylo Vodopianov 	return THREAD_RETURN;
191496c8249bSDanylo Vodopianov }
191596c8249bSDanylo Vodopianov 
1916a1ba8c47SDanylo Vodopianov /*
1917a1ba8c47SDanylo Vodopianov  * Adapter stat thread
1918a1ba8c47SDanylo Vodopianov  */
1919a1ba8c47SDanylo Vodopianov THREAD_FUNC adapter_stat_thread_fn(void *context)
1920a1ba8c47SDanylo Vodopianov {
1921a1ba8c47SDanylo Vodopianov 	const struct nt4ga_stat_ops *nt4ga_stat_ops = get_nt4ga_stat_ops();
1922a1ba8c47SDanylo Vodopianov 
1923a1ba8c47SDanylo Vodopianov 	if (nt4ga_stat_ops == NULL) {
1924a1ba8c47SDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "Statistics module uninitialized");
1925a1ba8c47SDanylo Vodopianov 		return THREAD_RETURN;
1926a1ba8c47SDanylo Vodopianov 	}
1927a1ba8c47SDanylo Vodopianov 
1928a1ba8c47SDanylo Vodopianov 	struct drv_s *p_drv = context;
1929a1ba8c47SDanylo Vodopianov 
1930a1ba8c47SDanylo Vodopianov 	ntdrv_4ga_t *p_nt_drv = &p_drv->ntdrv;
1931a1ba8c47SDanylo Vodopianov 	nt4ga_stat_t *p_nt4ga_stat = &p_nt_drv->adapter_info.nt4ga_stat;
1932a1ba8c47SDanylo Vodopianov 	nthw_stat_t *p_nthw_stat = p_nt4ga_stat->mp_nthw_stat;
1933a1ba8c47SDanylo Vodopianov 	const char *const p_adapter_id_str = p_nt_drv->adapter_info.mp_adapter_id_str;
1934a1ba8c47SDanylo Vodopianov 	(void)p_adapter_id_str;
1935a1ba8c47SDanylo Vodopianov 
1936a1ba8c47SDanylo Vodopianov 	if (!p_nthw_stat)
1937a1ba8c47SDanylo Vodopianov 		return THREAD_RETURN;
1938a1ba8c47SDanylo Vodopianov 
1939a1ba8c47SDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "%s: begin", p_adapter_id_str);
1940a1ba8c47SDanylo Vodopianov 
1941a1ba8c47SDanylo Vodopianov 	assert(p_nthw_stat);
1942a1ba8c47SDanylo Vodopianov 
1943a1ba8c47SDanylo Vodopianov 	while (!p_drv->ntdrv.b_shutdown) {
1944a1ba8c47SDanylo Vodopianov 		nt_os_wait_usec(10 * 1000);
1945a1ba8c47SDanylo Vodopianov 
1946a1ba8c47SDanylo Vodopianov 		nthw_stat_trigger(p_nthw_stat);
1947a1ba8c47SDanylo Vodopianov 
1948a1ba8c47SDanylo Vodopianov 		uint32_t loop = 0;
1949a1ba8c47SDanylo Vodopianov 
1950a1ba8c47SDanylo Vodopianov 		while ((!p_drv->ntdrv.b_shutdown) &&
1951a1ba8c47SDanylo Vodopianov 			(*p_nthw_stat->mp_timestamp == (uint64_t)-1)) {
1952a1ba8c47SDanylo Vodopianov 			nt_os_wait_usec(1 * 100);
1953a1ba8c47SDanylo Vodopianov 
1954a1ba8c47SDanylo Vodopianov 			if (rte_log_get_level(nt_log_ntnic) == RTE_LOG_DEBUG &&
1955a1ba8c47SDanylo Vodopianov 				(++loop & 0x3fff) == 0) {
1956a1ba8c47SDanylo Vodopianov 				if (p_nt4ga_stat->mp_nthw_rpf) {
1957a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "Statistics DMA frozen");
1958a1ba8c47SDanylo Vodopianov 
1959a1ba8c47SDanylo Vodopianov 				} else if (p_nt4ga_stat->mp_nthw_rmc) {
1960a1ba8c47SDanylo Vodopianov 					uint32_t sf_ram_of =
1961a1ba8c47SDanylo Vodopianov 						nthw_rmc_get_status_sf_ram_of(p_nt4ga_stat
1962a1ba8c47SDanylo Vodopianov 							->mp_nthw_rmc);
1963a1ba8c47SDanylo Vodopianov 					uint32_t descr_fifo_of =
1964a1ba8c47SDanylo Vodopianov 						nthw_rmc_get_status_descr_fifo_of(p_nt4ga_stat
1965a1ba8c47SDanylo Vodopianov 							->mp_nthw_rmc);
1966a1ba8c47SDanylo Vodopianov 
1967a1ba8c47SDanylo Vodopianov 					uint32_t dbg_merge =
1968a1ba8c47SDanylo Vodopianov 						nthw_rmc_get_dbg_merge(p_nt4ga_stat->mp_nthw_rmc);
1969a1ba8c47SDanylo Vodopianov 					uint32_t mac_if_err =
1970a1ba8c47SDanylo Vodopianov 						nthw_rmc_get_mac_if_err(p_nt4ga_stat->mp_nthw_rmc);
1971a1ba8c47SDanylo Vodopianov 
1972a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "Statistics DMA frozen");
1973a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "SF RAM Overflow     : %08x",
1974a1ba8c47SDanylo Vodopianov 						sf_ram_of);
1975a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "Descr Fifo Overflow : %08x",
1976a1ba8c47SDanylo Vodopianov 						descr_fifo_of);
1977a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "DBG Merge           : %08x",
1978a1ba8c47SDanylo Vodopianov 						dbg_merge);
1979a1ba8c47SDanylo Vodopianov 					NT_LOG(ERR, NTNIC, "MAC If Errors       : %08x",
1980a1ba8c47SDanylo Vodopianov 						mac_if_err);
1981a1ba8c47SDanylo Vodopianov 				}
1982a1ba8c47SDanylo Vodopianov 			}
1983a1ba8c47SDanylo Vodopianov 		}
1984a1ba8c47SDanylo Vodopianov 
1985a1ba8c47SDanylo Vodopianov 		/* Check then collect */
1986a1ba8c47SDanylo Vodopianov 		{
19872407c755SSerhii Iliushyk 			rte_spinlock_lock(&p_nt_drv->stat_lck);
1988a1ba8c47SDanylo Vodopianov 			nt4ga_stat_ops->nt4ga_stat_collect(&p_nt_drv->adapter_info, p_nt4ga_stat);
19892407c755SSerhii Iliushyk 			rte_spinlock_unlock(&p_nt_drv->stat_lck);
1990a1ba8c47SDanylo Vodopianov 		}
1991a1ba8c47SDanylo Vodopianov 	}
1992a1ba8c47SDanylo Vodopianov 
1993a1ba8c47SDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "%s: end", p_adapter_id_str);
1994a1ba8c47SDanylo Vodopianov 	return THREAD_RETURN;
1995a1ba8c47SDanylo Vodopianov }
1996a1ba8c47SDanylo Vodopianov 
1997c5cfe765SSerhii Iliushyk static int
199878b8b4abSSerhii Iliushyk nthw_pci_dev_init(struct rte_pci_device *pci_dev)
1999c5cfe765SSerhii Iliushyk {
2000b01eb812SDanylo Vodopianov 	const struct flow_filter_ops *flow_filter_ops = get_flow_filter_ops();
2001b01eb812SDanylo Vodopianov 
2002b01eb812SDanylo Vodopianov 	if (flow_filter_ops == NULL) {
2003b01eb812SDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "flow_filter module uninitialized");
2004b01eb812SDanylo Vodopianov 		/* Return statement is not necessary here to allow traffic processing by SW  */
2005b01eb812SDanylo Vodopianov 	}
2006b01eb812SDanylo Vodopianov 
200796c8249bSDanylo Vodopianov 	const struct profile_inline_ops *profile_inline_ops = get_profile_inline_ops();
200896c8249bSDanylo Vodopianov 
200996c8249bSDanylo Vodopianov 	if (profile_inline_ops == NULL) {
201096c8249bSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "profile_inline module uninitialized");
201196c8249bSDanylo Vodopianov 		/* Return statement is not necessary here to allow traffic processing by SW  */
201296c8249bSDanylo Vodopianov 	}
201396c8249bSDanylo Vodopianov 
2014c1c13953SSerhii Iliushyk 	nt_vfio_init();
20159147e9f9SSerhii Iliushyk 	const struct port_ops *port_ops = get_port_ops();
20169147e9f9SSerhii Iliushyk 
20179147e9f9SSerhii Iliushyk 	if (port_ops == NULL) {
20183489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Link management module uninitialized");
20199147e9f9SSerhii Iliushyk 		return -1;
20209147e9f9SSerhii Iliushyk 	}
2021c1c13953SSerhii Iliushyk 
2022d3dc3627SSerhii Iliushyk 	const struct adapter_ops *adapter_ops = get_adapter_ops();
2023d3dc3627SSerhii Iliushyk 
2024d3dc3627SSerhii Iliushyk 	if (adapter_ops == NULL) {
20253489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "Adapter module uninitialized");
2026d3dc3627SSerhii Iliushyk 		return -1;
2027d3dc3627SSerhii Iliushyk 	}
2028d3dc3627SSerhii Iliushyk 
2029fe91ade9SDanylo Vodopianov 	int res;
2030c93ef6edSSerhii Iliushyk 	struct drv_s *p_drv;
2031c93ef6edSSerhii Iliushyk 	ntdrv_4ga_t *p_nt_drv;
2032ddf184d0SSerhii Iliushyk 	hw_info_t *p_hw_info;
2033ddf184d0SSerhii Iliushyk 	fpga_info_t *fpga_info;
203478b8b4abSSerhii Iliushyk 	uint32_t n_port_mask = -1;	/* All ports enabled by default */
2035c93ef6edSSerhii Iliushyk 	uint32_t nb_rx_queues = 1;
2036c93ef6edSSerhii Iliushyk 	uint32_t nb_tx_queues = 1;
2037b01eb812SDanylo Vodopianov 	uint32_t exception_path = 0;
2038fe91ade9SDanylo Vodopianov 	struct flow_queue_id_s queue_ids[MAX_QUEUES];
203978b8b4abSSerhii Iliushyk 	int n_phy_ports;
20409147e9f9SSerhii Iliushyk 	struct port_link_speed pls_mbps[NUM_ADAPTER_PORTS_MAX] = { 0 };
20419147e9f9SSerhii Iliushyk 	int num_port_speeds = 0;
2042b01eb812SDanylo Vodopianov 	enum flow_eth_dev_profile profile = FLOW_ETH_DEV_PROFILE_INLINE;
2043b01eb812SDanylo Vodopianov 
20443489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Dev %s PF #%i Init : %02x:%02x:%i", pci_dev->name,
204578b8b4abSSerhii Iliushyk 		pci_dev->addr.function, pci_dev->addr.bus, pci_dev->addr.devid,
204678b8b4abSSerhii Iliushyk 		pci_dev->addr.function);
204778b8b4abSSerhii Iliushyk 
2048fe91ade9SDanylo Vodopianov 	/*
2049fe91ade9SDanylo Vodopianov 	 * Process options/arguments
2050fe91ade9SDanylo Vodopianov 	 */
2051fe91ade9SDanylo Vodopianov 	if (pci_dev->device.devargs && pci_dev->device.devargs->args) {
2052fe91ade9SDanylo Vodopianov 		int kvargs_count;
2053fe91ade9SDanylo Vodopianov 		struct rte_kvargs *kvlist =
2054fe91ade9SDanylo Vodopianov 			rte_kvargs_parse(pci_dev->device.devargs->args, valid_arguments);
2055fe91ade9SDanylo Vodopianov 
2056fe91ade9SDanylo Vodopianov 		if (kvlist == NULL)
2057fe91ade9SDanylo Vodopianov 			return -1;
2058fe91ade9SDanylo Vodopianov 
2059fe91ade9SDanylo Vodopianov 		/*
2060fe91ade9SDanylo Vodopianov 		 * Argument: help
2061fe91ade9SDanylo Vodopianov 		 * NOTE: this argument/option check should be the first as it will stop
2062fe91ade9SDanylo Vodopianov 		 * execution after producing its output
2063fe91ade9SDanylo Vodopianov 		 */
2064fe91ade9SDanylo Vodopianov 		{
2065fe91ade9SDanylo Vodopianov 			if (rte_kvargs_get(kvlist, ETH_DEV_NTNIC_HELP_ARG)) {
2066fe91ade9SDanylo Vodopianov 				size_t i;
2067fe91ade9SDanylo Vodopianov 
2068fe91ade9SDanylo Vodopianov 				for (i = 0; i < RTE_DIM(valid_arguments); i++)
2069fe91ade9SDanylo Vodopianov 					if (valid_arguments[i] == NULL)
2070fe91ade9SDanylo Vodopianov 						break;
2071fe91ade9SDanylo Vodopianov 
2072fe91ade9SDanylo Vodopianov 				exit(0);
2073fe91ade9SDanylo Vodopianov 			}
2074fe91ade9SDanylo Vodopianov 		}
2075fe91ade9SDanylo Vodopianov 
2076fe91ade9SDanylo Vodopianov 		/*
2077fe91ade9SDanylo Vodopianov 		 * rxq option/argument
2078fe91ade9SDanylo Vodopianov 		 * The number of rxq (hostbuffers) allocated in memory.
2079fe91ade9SDanylo Vodopianov 		 * Default is 32 RX Hostbuffers
2080fe91ade9SDanylo Vodopianov 		 */
2081fe91ade9SDanylo Vodopianov 		kvargs_count = rte_kvargs_count(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG);
2082fe91ade9SDanylo Vodopianov 
2083fe91ade9SDanylo Vodopianov 		if (kvargs_count != 0) {
2084fe91ade9SDanylo Vodopianov 			assert(kvargs_count == 1);
2085fe91ade9SDanylo Vodopianov 			res = rte_kvargs_process(kvlist, ETH_DEV_NTHW_RXQUEUES_ARG, &string_to_u32,
2086fe91ade9SDanylo Vodopianov 					&nb_rx_queues);
2087fe91ade9SDanylo Vodopianov 
2088fe91ade9SDanylo Vodopianov 			if (res < 0) {
2089fe91ade9SDanylo Vodopianov 				NT_LOG_DBGX(ERR, NTNIC,
2090fe91ade9SDanylo Vodopianov 					"problem with command line arguments: res=%d",
2091fe91ade9SDanylo Vodopianov 					res);
2092fe91ade9SDanylo Vodopianov 				return -1;
2093fe91ade9SDanylo Vodopianov 			}
2094fe91ade9SDanylo Vodopianov 
2095fe91ade9SDanylo Vodopianov 			NT_LOG_DBGX(DBG, NTNIC, "devargs: %s=%u",
2096fe91ade9SDanylo Vodopianov 				ETH_DEV_NTHW_RXQUEUES_ARG, nb_rx_queues);
2097fe91ade9SDanylo Vodopianov 		}
2098fe91ade9SDanylo Vodopianov 
2099fe91ade9SDanylo Vodopianov 		/*
2100fe91ade9SDanylo Vodopianov 		 * txq option/argument
2101fe91ade9SDanylo Vodopianov 		 * The number of txq (hostbuffers) allocated in memory.
2102fe91ade9SDanylo Vodopianov 		 * Default is 32 TX Hostbuffers
2103fe91ade9SDanylo Vodopianov 		 */
2104fe91ade9SDanylo Vodopianov 		kvargs_count = rte_kvargs_count(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG);
2105fe91ade9SDanylo Vodopianov 
2106fe91ade9SDanylo Vodopianov 		if (kvargs_count != 0) {
2107fe91ade9SDanylo Vodopianov 			assert(kvargs_count == 1);
2108fe91ade9SDanylo Vodopianov 			res = rte_kvargs_process(kvlist, ETH_DEV_NTHW_TXQUEUES_ARG, &string_to_u32,
2109fe91ade9SDanylo Vodopianov 					&nb_tx_queues);
2110fe91ade9SDanylo Vodopianov 
2111fe91ade9SDanylo Vodopianov 			if (res < 0) {
2112fe91ade9SDanylo Vodopianov 				NT_LOG_DBGX(ERR, NTNIC,
2113fe91ade9SDanylo Vodopianov 					"problem with command line arguments: res=%d",
2114fe91ade9SDanylo Vodopianov 					res);
2115fe91ade9SDanylo Vodopianov 				return -1;
2116fe91ade9SDanylo Vodopianov 			}
2117fe91ade9SDanylo Vodopianov 
2118fe91ade9SDanylo Vodopianov 			NT_LOG_DBGX(DBG, NTNIC, "devargs: %s=%u",
2119fe91ade9SDanylo Vodopianov 				ETH_DEV_NTHW_TXQUEUES_ARG, nb_tx_queues);
2120fe91ade9SDanylo Vodopianov 		}
2121fe91ade9SDanylo Vodopianov 	}
2122fe91ade9SDanylo Vodopianov 
2123c1c13953SSerhii Iliushyk 
2124c93ef6edSSerhii Iliushyk 	/* alloc */
2125c93ef6edSSerhii Iliushyk 	p_drv = rte_zmalloc_socket(pci_dev->name, sizeof(struct drv_s), RTE_CACHE_LINE_SIZE,
2126c93ef6edSSerhii Iliushyk 			pci_dev->device.numa_node);
2127c93ef6edSSerhii Iliushyk 
2128c93ef6edSSerhii Iliushyk 	if (!p_drv) {
21293489b87bSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "%s: error %d",
2130c93ef6edSSerhii Iliushyk 			(pci_dev->name[0] ? pci_dev->name : "NA"), -1);
2131c93ef6edSSerhii Iliushyk 		return -1;
2132c93ef6edSSerhii Iliushyk 	}
2133c93ef6edSSerhii Iliushyk 
2134c1c13953SSerhii Iliushyk 	/* Setup VFIO context */
2135c1c13953SSerhii Iliushyk 	int vfio = nt_vfio_setup(pci_dev);
2136c1c13953SSerhii Iliushyk 
2137c1c13953SSerhii Iliushyk 	if (vfio < 0) {
21383489b87bSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "%s: vfio_setup error %d",
2139c1c13953SSerhii Iliushyk 			(pci_dev->name[0] ? pci_dev->name : "NA"), -1);
2140c93ef6edSSerhii Iliushyk 		rte_free(p_drv);
2141c1c13953SSerhii Iliushyk 		return -1;
2142c1c13953SSerhii Iliushyk 	}
2143c1c13953SSerhii Iliushyk 
2144c93ef6edSSerhii Iliushyk 	/* context */
2145c93ef6edSSerhii Iliushyk 	p_nt_drv = &p_drv->ntdrv;
2146ddf184d0SSerhii Iliushyk 	p_hw_info = &p_nt_drv->adapter_info.hw_info;
2147ddf184d0SSerhii Iliushyk 	fpga_info = &p_nt_drv->adapter_info.fpga_info;
2148c93ef6edSSerhii Iliushyk 
2149c93ef6edSSerhii Iliushyk 	p_drv->p_dev = pci_dev;
2150c93ef6edSSerhii Iliushyk 
2151c93ef6edSSerhii Iliushyk 	/* Set context for NtDrv */
2152c93ef6edSSerhii Iliushyk 	p_nt_drv->pciident = BDF_TO_PCIIDENT(pci_dev->addr.domain, pci_dev->addr.bus,
2153c93ef6edSSerhii Iliushyk 			pci_dev->addr.devid, pci_dev->addr.function);
2154d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.n_rx_host_buffers = nb_rx_queues;
2155d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.n_tx_host_buffers = nb_tx_queues;
2156d3dc3627SSerhii Iliushyk 
2157ddf184d0SSerhii Iliushyk 	fpga_info->bar0_addr = (void *)pci_dev->mem_resource[0].addr;
2158ddf184d0SSerhii Iliushyk 	fpga_info->bar0_size = pci_dev->mem_resource[0].len;
2159ddf184d0SSerhii Iliushyk 	fpga_info->numa_node = pci_dev->device.numa_node;
2160ddf184d0SSerhii Iliushyk 	fpga_info->pciident = p_nt_drv->pciident;
2161ddf184d0SSerhii Iliushyk 	fpga_info->adapter_no = p_drv->adapter_no;
2162d3dc3627SSerhii Iliushyk 
2163d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.hw_info.pci_class_id = pci_dev->id.class_id;
2164d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.hw_info.pci_vendor_id = pci_dev->id.vendor_id;
2165d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.hw_info.pci_device_id = pci_dev->id.device_id;
2166d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id = pci_dev->id.subsystem_vendor_id;
2167d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.hw_info.pci_sub_device_id = pci_dev->id.subsystem_device_id;
2168d3dc3627SSerhii Iliushyk 
21693489b87bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "%s: " PCIIDENT_PRINT_STR " %04X:%04X: %04X:%04X:",
2170d3dc3627SSerhii Iliushyk 		p_nt_drv->adapter_info.mp_adapter_id_str, PCIIDENT_TO_DOMAIN(p_nt_drv->pciident),
2171d3dc3627SSerhii Iliushyk 		PCIIDENT_TO_BUSNR(p_nt_drv->pciident), PCIIDENT_TO_DEVNR(p_nt_drv->pciident),
2172d3dc3627SSerhii Iliushyk 		PCIIDENT_TO_FUNCNR(p_nt_drv->pciident),
2173d3dc3627SSerhii Iliushyk 		p_nt_drv->adapter_info.hw_info.pci_vendor_id,
2174d3dc3627SSerhii Iliushyk 		p_nt_drv->adapter_info.hw_info.pci_device_id,
2175d3dc3627SSerhii Iliushyk 		p_nt_drv->adapter_info.hw_info.pci_sub_vendor_id,
2176d3dc3627SSerhii Iliushyk 		p_nt_drv->adapter_info.hw_info.pci_sub_device_id);
2177c93ef6edSSerhii Iliushyk 
2178c93ef6edSSerhii Iliushyk 	p_nt_drv->b_shutdown = false;
2179d3dc3627SSerhii Iliushyk 	p_nt_drv->adapter_info.pb_shutdown = &p_nt_drv->b_shutdown;
2180c93ef6edSSerhii Iliushyk 
21819147e9f9SSerhii Iliushyk 	for (int i = 0; i < num_port_speeds; ++i) {
21829147e9f9SSerhii Iliushyk 		struct adapter_info_s *p_adapter_info = &p_nt_drv->adapter_info;
21839147e9f9SSerhii Iliushyk 		nt_link_speed_t link_speed = convert_link_speed(pls_mbps[i].link_speed);
21849147e9f9SSerhii Iliushyk 		port_ops->set_link_speed(p_adapter_info, i, link_speed);
21859147e9f9SSerhii Iliushyk 	}
21869147e9f9SSerhii Iliushyk 
2187c93ef6edSSerhii Iliushyk 	/* store context */
2188c93ef6edSSerhii Iliushyk 	store_pdrv(p_drv);
2189c93ef6edSSerhii Iliushyk 
2190d3dc3627SSerhii Iliushyk 	/* initialize nt4ga nthw fpga module instance in drv */
2191d3dc3627SSerhii Iliushyk 	int err = adapter_ops->init(&p_nt_drv->adapter_info);
2192d3dc3627SSerhii Iliushyk 
2193d3dc3627SSerhii Iliushyk 	if (err != 0) {
21943489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "%s: Cannot initialize the adapter instance",
2195d3dc3627SSerhii Iliushyk 			p_nt_drv->adapter_info.mp_adapter_id_str);
2196d3dc3627SSerhii Iliushyk 		return -1;
2197d3dc3627SSerhii Iliushyk 	}
2198d3dc3627SSerhii Iliushyk 
2199c35c06fbSDanylo Vodopianov 	const struct meter_ops_s *meter_ops = get_meter_ops();
2200c35c06fbSDanylo Vodopianov 
2201c35c06fbSDanylo Vodopianov 	if (meter_ops != NULL)
2202c35c06fbSDanylo Vodopianov 		nthw_eth_dev_ops.mtr_ops_get = meter_ops->eth_mtr_ops_get;
2203c35c06fbSDanylo Vodopianov 
2204c35c06fbSDanylo Vodopianov 	else
2205c35c06fbSDanylo Vodopianov 		NT_LOG(DBG, NTNIC, "Meter module is not initialized");
2206c35c06fbSDanylo Vodopianov 
2207b0cd36e9SDanylo Vodopianov 	/* Initialize the queue system */
2208b0cd36e9SDanylo Vodopianov 	if (err == 0) {
2209b0cd36e9SDanylo Vodopianov 		sg_ops = get_sg_ops();
2210b0cd36e9SDanylo Vodopianov 
2211b0cd36e9SDanylo Vodopianov 		if (sg_ops != NULL) {
2212b0cd36e9SDanylo Vodopianov 			err = sg_ops->nthw_virt_queue_init(fpga_info);
2213b0cd36e9SDanylo Vodopianov 
2214b0cd36e9SDanylo Vodopianov 			if (err != 0) {
2215b0cd36e9SDanylo Vodopianov 				NT_LOG(ERR, NTNIC,
2216b0cd36e9SDanylo Vodopianov 					"%s: Cannot initialize scatter-gather queues",
2217b0cd36e9SDanylo Vodopianov 					p_nt_drv->adapter_info.mp_adapter_id_str);
2218b0cd36e9SDanylo Vodopianov 
2219b0cd36e9SDanylo Vodopianov 			} else {
2220b0cd36e9SDanylo Vodopianov 				NT_LOG(DBG, NTNIC, "%s: Initialized scatter-gather queues",
2221b0cd36e9SDanylo Vodopianov 					p_nt_drv->adapter_info.mp_adapter_id_str);
2222b0cd36e9SDanylo Vodopianov 			}
2223b0cd36e9SDanylo Vodopianov 
2224b0cd36e9SDanylo Vodopianov 		} else {
2225b0cd36e9SDanylo Vodopianov 			NT_LOG_DBGX(DBG, NTNIC, "SG module is not initialized");
2226b0cd36e9SDanylo Vodopianov 		}
2227b0cd36e9SDanylo Vodopianov 	}
2228b0cd36e9SDanylo Vodopianov 
2229d3dc3627SSerhii Iliushyk 	/* Start ctrl, monitor, stat thread only for primary process. */
2230d3dc3627SSerhii Iliushyk 	if (err == 0) {
2231d3dc3627SSerhii Iliushyk 		/* mp_adapter_id_str is initialized after nt4ga_adapter_init(p_nt_drv) */
2232d3dc3627SSerhii Iliushyk 		const char *const p_adapter_id_str = p_nt_drv->adapter_info.mp_adapter_id_str;
2233d3dc3627SSerhii Iliushyk 		(void)p_adapter_id_str;
2234ddf184d0SSerhii Iliushyk 		NT_LOG(DBG, NTNIC,
22353489b87bSDanylo Vodopianov 			"%s: %s: AdapterPCI=" PCIIDENT_PRINT_STR " Hw=0x%02X_rev%d PhyPorts=%d",
2236ddf184d0SSerhii Iliushyk 			(pci_dev->name[0] ? pci_dev->name : "NA"), p_adapter_id_str,
2237ddf184d0SSerhii Iliushyk 			PCIIDENT_TO_DOMAIN(p_nt_drv->adapter_info.fpga_info.pciident),
2238ddf184d0SSerhii Iliushyk 			PCIIDENT_TO_BUSNR(p_nt_drv->adapter_info.fpga_info.pciident),
2239ddf184d0SSerhii Iliushyk 			PCIIDENT_TO_DEVNR(p_nt_drv->adapter_info.fpga_info.pciident),
2240ddf184d0SSerhii Iliushyk 			PCIIDENT_TO_FUNCNR(p_nt_drv->adapter_info.fpga_info.pciident),
2241ddf184d0SSerhii Iliushyk 			p_hw_info->hw_platform_id, fpga_info->nthw_hw_info.hw_id,
2242ddf184d0SSerhii Iliushyk 			fpga_info->n_phy_ports);
2243d3dc3627SSerhii Iliushyk 
2244d3dc3627SSerhii Iliushyk 	} else {
22453489b87bSDanylo Vodopianov 		NT_LOG_DBGX(ERR, NTNIC, "%s: error=%d",
2246d3dc3627SSerhii Iliushyk 			(pci_dev->name[0] ? pci_dev->name : "NA"), err);
2247d3dc3627SSerhii Iliushyk 		return -1;
2248d3dc3627SSerhii Iliushyk 	}
2249d3dc3627SSerhii Iliushyk 
225096c8249bSDanylo Vodopianov 	if (profile_inline_ops != NULL && fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
225196c8249bSDanylo Vodopianov 		profile_inline_ops->flm_setup_queues();
225296c8249bSDanylo Vodopianov 		res = THREAD_CTRL_CREATE(&p_nt_drv->flm_thread, "ntnic-nt_flm_update_thr",
225396c8249bSDanylo Vodopianov 			adapter_flm_update_thread_fn, (void *)p_drv);
225496c8249bSDanylo Vodopianov 
225596c8249bSDanylo Vodopianov 		if (res) {
225696c8249bSDanylo Vodopianov 			NT_LOG_DBGX(ERR, NTNIC, "%s: error=%d",
225796c8249bSDanylo Vodopianov 				(pci_dev->name[0] ? pci_dev->name : "NA"), res);
225896c8249bSDanylo Vodopianov 			return -1;
225996c8249bSDanylo Vodopianov 		}
226096c8249bSDanylo Vodopianov 	}
226196c8249bSDanylo Vodopianov 
22622407c755SSerhii Iliushyk 	rte_spinlock_init(&p_nt_drv->stat_lck);
2263a1ba8c47SDanylo Vodopianov 	res = THREAD_CTRL_CREATE(&p_nt_drv->stat_thread, "nt4ga_stat_thr", adapter_stat_thread_fn,
2264a1ba8c47SDanylo Vodopianov 			(void *)p_drv);
2265a1ba8c47SDanylo Vodopianov 
2266a1ba8c47SDanylo Vodopianov 	if (res) {
2267a1ba8c47SDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "%s: error=%d",
2268a1ba8c47SDanylo Vodopianov 			(pci_dev->name[0] ? pci_dev->name : "NA"), res);
2269a1ba8c47SDanylo Vodopianov 		return -1;
2270a1ba8c47SDanylo Vodopianov 	}
2271a1ba8c47SDanylo Vodopianov 
2272ddf184d0SSerhii Iliushyk 	n_phy_ports = fpga_info->n_phy_ports;
227378b8b4abSSerhii Iliushyk 
227478b8b4abSSerhii Iliushyk 	for (int n_intf_no = 0; n_intf_no < n_phy_ports; n_intf_no++) {
2275d3dc3627SSerhii Iliushyk 		const char *const p_port_id_str = p_nt_drv->adapter_info.mp_port_id_str[n_intf_no];
2276d3dc3627SSerhii Iliushyk 		(void)p_port_id_str;
2277c93ef6edSSerhii Iliushyk 		struct pmd_internals *internals = NULL;
227878b8b4abSSerhii Iliushyk 		struct rte_eth_dev *eth_dev = NULL;
227978b8b4abSSerhii Iliushyk 		char name[32];
2280fe91ade9SDanylo Vodopianov 		int i;
228178b8b4abSSerhii Iliushyk 
2282d3dc3627SSerhii Iliushyk 		if ((1 << n_intf_no) & ~n_port_mask) {
22833489b87bSDanylo Vodopianov 			NT_LOG_DBGX(DBG, NTNIC,
22843489b87bSDanylo Vodopianov 				"%s: interface #%d: skipping due to portmask 0x%02X",
2285d3dc3627SSerhii Iliushyk 				p_port_id_str, n_intf_no, n_port_mask);
228678b8b4abSSerhii Iliushyk 			continue;
2287d3dc3627SSerhii Iliushyk 		}
228878b8b4abSSerhii Iliushyk 
228978b8b4abSSerhii Iliushyk 		snprintf(name, sizeof(name), "ntnic%d", n_intf_no);
22903489b87bSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "%s: interface #%d: %s: '%s'", p_port_id_str,
2291d3dc3627SSerhii Iliushyk 			n_intf_no, (pci_dev->name[0] ? pci_dev->name : "NA"), name);
229278b8b4abSSerhii Iliushyk 
2293c93ef6edSSerhii Iliushyk 		internals = rte_zmalloc_socket(name, sizeof(struct pmd_internals),
2294c93ef6edSSerhii Iliushyk 				RTE_CACHE_LINE_SIZE, pci_dev->device.numa_node);
2295c93ef6edSSerhii Iliushyk 
2296c93ef6edSSerhii Iliushyk 		if (!internals) {
22973489b87bSDanylo Vodopianov 			NT_LOG_DBGX(ERR, NTNIC, "%s: %s: error=%d",
2298c93ef6edSSerhii Iliushyk 				(pci_dev->name[0] ? pci_dev->name : "NA"), name, -1);
2299c93ef6edSSerhii Iliushyk 			return -1;
2300c93ef6edSSerhii Iliushyk 		}
2301c93ef6edSSerhii Iliushyk 
2302c93ef6edSSerhii Iliushyk 		internals->pci_dev = pci_dev;
2303c93ef6edSSerhii Iliushyk 		internals->n_intf_no = n_intf_no;
23049147e9f9SSerhii Iliushyk 		internals->type = PORT_TYPE_PHYSICAL;
2305*6019656dSOleksandr Kolomeiets 		internals->port = n_intf_no;
23069147e9f9SSerhii Iliushyk 		internals->nb_rx_queues = nb_rx_queues;
23079147e9f9SSerhii Iliushyk 		internals->nb_tx_queues = nb_tx_queues;
23089147e9f9SSerhii Iliushyk 
2309fe91ade9SDanylo Vodopianov 		/* Not used queue index as dest port in bypass - use 0x80 + port nr */
2310fe91ade9SDanylo Vodopianov 		for (i = 0; i < MAX_QUEUES; i++)
2311fe91ade9SDanylo Vodopianov 			internals->vpq[i].hw_id = -1;
2312fe91ade9SDanylo Vodopianov 
2313c93ef6edSSerhii Iliushyk 
2314c93ef6edSSerhii Iliushyk 		/* Setup queue_ids */
2315c93ef6edSSerhii Iliushyk 		if (nb_rx_queues > 1) {
2316c93ef6edSSerhii Iliushyk 			NT_LOG(DBG, NTNIC,
23173489b87bSDanylo Vodopianov 				"(%i) NTNIC configured with Rx multi queues. %i queues",
2318c93ef6edSSerhii Iliushyk 				internals->n_intf_no, nb_rx_queues);
2319c93ef6edSSerhii Iliushyk 		}
2320c93ef6edSSerhii Iliushyk 
2321c93ef6edSSerhii Iliushyk 		if (nb_tx_queues > 1) {
2322c93ef6edSSerhii Iliushyk 			NT_LOG(DBG, NTNIC,
23233489b87bSDanylo Vodopianov 				"(%i) NTNIC configured with Tx multi queues. %i queues",
2324c93ef6edSSerhii Iliushyk 				internals->n_intf_no, nb_tx_queues);
2325c93ef6edSSerhii Iliushyk 		}
2326c93ef6edSSerhii Iliushyk 
2327fe91ade9SDanylo Vodopianov 		int max_num_queues = (nb_rx_queues > nb_tx_queues) ? nb_rx_queues : nb_tx_queues;
2328fe91ade9SDanylo Vodopianov 		int start_queue = allocate_queue(max_num_queues);
2329fe91ade9SDanylo Vodopianov 
2330fe91ade9SDanylo Vodopianov 		if (start_queue < 0)
2331fe91ade9SDanylo Vodopianov 			return -1;
2332fe91ade9SDanylo Vodopianov 
2333fe91ade9SDanylo Vodopianov 		for (i = 0; i < (int)max_num_queues; i++) {
2334fe91ade9SDanylo Vodopianov 			queue_ids[i].id = i;
2335fe91ade9SDanylo Vodopianov 			queue_ids[i].hw_id = start_queue + i;
2336fe91ade9SDanylo Vodopianov 
2337fe91ade9SDanylo Vodopianov 			internals->rxq_scg[i].queue = queue_ids[i];
2338fe91ade9SDanylo Vodopianov 			/* use same index in Rx and Tx rings */
2339fe91ade9SDanylo Vodopianov 			internals->txq_scg[i].queue = queue_ids[i];
2340fe91ade9SDanylo Vodopianov 			internals->rxq_scg[i].enabled = 0;
2341fe91ade9SDanylo Vodopianov 			internals->txq_scg[i].type = internals->type;
2342fe91ade9SDanylo Vodopianov 			internals->rxq_scg[i].type = internals->type;
2343fe91ade9SDanylo Vodopianov 			internals->rxq_scg[i].port = internals->port;
2344fe91ade9SDanylo Vodopianov 		}
2345fe91ade9SDanylo Vodopianov 
2346fe91ade9SDanylo Vodopianov 		/* no tx queues - tx data goes out on phy */
2347fe91ade9SDanylo Vodopianov 		internals->vpq_nb_vq = 0;
2348fe91ade9SDanylo Vodopianov 
2349fe91ade9SDanylo Vodopianov 		for (i = 0; i < (int)nb_tx_queues; i++) {
2350fe91ade9SDanylo Vodopianov 			internals->txq_scg[i].port = internals->port;
2351fe91ade9SDanylo Vodopianov 			internals->txq_scg[i].enabled = 0;
2352fe91ade9SDanylo Vodopianov 		}
2353fe91ade9SDanylo Vodopianov 
23549147e9f9SSerhii Iliushyk 		/* Set MAC address (but only if the MAC address is permitted) */
23559147e9f9SSerhii Iliushyk 		if (n_intf_no < fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_count) {
23569147e9f9SSerhii Iliushyk 			const uint64_t mac =
23579147e9f9SSerhii Iliushyk 				fpga_info->nthw_hw_info.vpd_info.mn_mac_addr_value + n_intf_no;
23589147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[0] = (mac >> 40) & 0xFFu;
23599147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[1] = (mac >> 32) & 0xFFu;
23609147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[2] = (mac >> 24) & 0xFFu;
23619147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[3] = (mac >> 16) & 0xFFu;
23629147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[4] = (mac >> 8) & 0xFFu;
23639147e9f9SSerhii Iliushyk 			internals->eth_addrs[0].addr_bytes[5] = (mac >> 0) & 0xFFu;
23649147e9f9SSerhii Iliushyk 		}
23659147e9f9SSerhii Iliushyk 
236678b8b4abSSerhii Iliushyk 		eth_dev = rte_eth_dev_allocate(name);
236778b8b4abSSerhii Iliushyk 
236878b8b4abSSerhii Iliushyk 		if (!eth_dev) {
23693489b87bSDanylo Vodopianov 			NT_LOG_DBGX(ERR, NTNIC, "%s: %s: error=%d",
237078b8b4abSSerhii Iliushyk 				(pci_dev->name[0] ? pci_dev->name : "NA"), name, -1);
237178b8b4abSSerhii Iliushyk 			return -1;
237278b8b4abSSerhii Iliushyk 		}
237378b8b4abSSerhii Iliushyk 
2374b01eb812SDanylo Vodopianov 		if (flow_filter_ops != NULL) {
2375b01eb812SDanylo Vodopianov 			internals->flw_dev = flow_filter_ops->flow_get_eth_dev(0, n_intf_no,
2376b01eb812SDanylo Vodopianov 				eth_dev->data->port_id, nb_rx_queues, queue_ids,
2377b01eb812SDanylo Vodopianov 				&internals->txq_scg[0].rss_target_id, profile, exception_path);
2378b01eb812SDanylo Vodopianov 
2379b01eb812SDanylo Vodopianov 			if (!internals->flw_dev) {
2380b01eb812SDanylo Vodopianov 				NT_LOG(ERR, NTNIC,
2381b01eb812SDanylo Vodopianov 					"Error creating port. Resource exhaustion in HW");
2382b01eb812SDanylo Vodopianov 				return -1;
2383b01eb812SDanylo Vodopianov 			}
2384b01eb812SDanylo Vodopianov 		}
2385b01eb812SDanylo Vodopianov 
23869147e9f9SSerhii Iliushyk 		/* connect structs */
23879147e9f9SSerhii Iliushyk 		internals->p_drv = p_drv;
23889147e9f9SSerhii Iliushyk 		eth_dev->data->dev_private = internals;
23899147e9f9SSerhii Iliushyk 		eth_dev->data->mac_addrs = rte_malloc(NULL,
23909147e9f9SSerhii Iliushyk 					NUM_MAC_ADDRS_PER_PORT * sizeof(struct rte_ether_addr), 0);
23919147e9f9SSerhii Iliushyk 		rte_memcpy(&eth_dev->data->mac_addrs[0],
23929147e9f9SSerhii Iliushyk 					&internals->eth_addrs[0], RTE_ETHER_ADDR_LEN);
23939147e9f9SSerhii Iliushyk 
2394da25ae3cSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "Setting up RX functions for SCG");
2395da25ae3cSDanylo Vodopianov 		eth_dev->rx_pkt_burst = eth_dev_rx_scg;
2396da25ae3cSDanylo Vodopianov 		eth_dev->tx_pkt_burst = eth_dev_tx_scg;
2397da25ae3cSDanylo Vodopianov 		eth_dev->tx_pkt_prepare = NULL;
23989147e9f9SSerhii Iliushyk 
239978b8b4abSSerhii Iliushyk 		struct rte_eth_link pmd_link;
240078b8b4abSSerhii Iliushyk 		pmd_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
240178b8b4abSSerhii Iliushyk 		pmd_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
240278b8b4abSSerhii Iliushyk 		pmd_link.link_status = RTE_ETH_LINK_DOWN;
240378b8b4abSSerhii Iliushyk 		pmd_link.link_autoneg = RTE_ETH_LINK_AUTONEG;
240478b8b4abSSerhii Iliushyk 
240578b8b4abSSerhii Iliushyk 		eth_dev->device = &pci_dev->device;
240678b8b4abSSerhii Iliushyk 		eth_dev->data->dev_link = pmd_link;
2407c93ef6edSSerhii Iliushyk 		eth_dev->dev_ops = &nthw_eth_dev_ops;
240878b8b4abSSerhii Iliushyk 
240978b8b4abSSerhii Iliushyk 		eth_dev_pci_specific_init(eth_dev, pci_dev);
241078b8b4abSSerhii Iliushyk 		rte_eth_dev_probing_finish(eth_dev);
2411c93ef6edSSerhii Iliushyk 
2412c93ef6edSSerhii Iliushyk 		/* increase initialized ethernet devices - PF */
2413c93ef6edSSerhii Iliushyk 		p_drv->n_eth_dev_init_count++;
2414e7e49ce6SDanylo Vodopianov 
2415*6019656dSOleksandr Kolomeiets 		if (get_flow_filter_ops() != NULL) {
2416*6019656dSOleksandr Kolomeiets 			if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE &&
2417*6019656dSOleksandr Kolomeiets 				internals->flw_dev->ndev->be.tpe.ver >= 2) {
2418*6019656dSOleksandr Kolomeiets 				assert(nthw_eth_dev_ops.mtu_set == dev_set_mtu_inline ||
2419*6019656dSOleksandr Kolomeiets 					nthw_eth_dev_ops.mtu_set == NULL);
2420*6019656dSOleksandr Kolomeiets 				nthw_eth_dev_ops.mtu_set = dev_set_mtu_inline;
2421*6019656dSOleksandr Kolomeiets 				dev_set_mtu_inline(eth_dev, MTUINITVAL);
2422*6019656dSOleksandr Kolomeiets 				NT_LOG_DBGX(DBG, NTNIC, "INLINE MTU supported, tpe version %d",
2423*6019656dSOleksandr Kolomeiets 					internals->flw_dev->ndev->be.tpe.ver);
2424*6019656dSOleksandr Kolomeiets 
2425*6019656dSOleksandr Kolomeiets 			} else {
2426*6019656dSOleksandr Kolomeiets 				NT_LOG(DBG, NTNIC, "INLINE MTU not supported");
2427*6019656dSOleksandr Kolomeiets 			}
2428*6019656dSOleksandr Kolomeiets 		}
2429*6019656dSOleksandr Kolomeiets 
2430e7e49ce6SDanylo Vodopianov 		/* Port event thread */
2431e7e49ce6SDanylo Vodopianov 		if (fpga_info->profile == FPGA_INFO_PROFILE_INLINE) {
2432e7e49ce6SDanylo Vodopianov 			res = THREAD_CTRL_CREATE(&p_nt_drv->port_event_thread, "nt_port_event_thr",
2433e7e49ce6SDanylo Vodopianov 					port_event_thread_fn, (void *)internals);
2434e7e49ce6SDanylo Vodopianov 
2435e7e49ce6SDanylo Vodopianov 			if (res) {
2436e7e49ce6SDanylo Vodopianov 				NT_LOG(ERR, NTNIC, "%s: error=%d",
2437e7e49ce6SDanylo Vodopianov 					(pci_dev->name[0] ? pci_dev->name : "NA"), res);
2438e7e49ce6SDanylo Vodopianov 				return -1;
2439e7e49ce6SDanylo Vodopianov 			}
2440e7e49ce6SDanylo Vodopianov 		}
244178b8b4abSSerhii Iliushyk 	}
244278b8b4abSSerhii Iliushyk 
2443c5cfe765SSerhii Iliushyk 	return 0;
2444c5cfe765SSerhii Iliushyk }
2445c5cfe765SSerhii Iliushyk 
2446c5cfe765SSerhii Iliushyk static int
2447c5cfe765SSerhii Iliushyk nthw_pci_dev_deinit(struct rte_eth_dev *eth_dev __rte_unused)
2448c5cfe765SSerhii Iliushyk {
24493489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "PCI device deinitialization");
2450c1c13953SSerhii Iliushyk 
2451d3dc3627SSerhii Iliushyk 	int i;
2452d3dc3627SSerhii Iliushyk 	char name[32];
2453d3dc3627SSerhii Iliushyk 
2454d3dc3627SSerhii Iliushyk 	struct pmd_internals *internals = eth_dev->data->dev_private;
2455d3dc3627SSerhii Iliushyk 	ntdrv_4ga_t *p_ntdrv = &internals->p_drv->ntdrv;
2456d3dc3627SSerhii Iliushyk 	fpga_info_t *fpga_info = &p_ntdrv->adapter_info.fpga_info;
2457d3dc3627SSerhii Iliushyk 	const int n_phy_ports = fpga_info->n_phy_ports;
2458b0cd36e9SDanylo Vodopianov 
2459b0cd36e9SDanylo Vodopianov 	/* let running threads end Rx and Tx activity */
2460b0cd36e9SDanylo Vodopianov 	if (sg_ops != NULL) {
2461b0cd36e9SDanylo Vodopianov 		nt_os_wait_usec(1 * 1000 * 1000);
2462b0cd36e9SDanylo Vodopianov 
2463b0cd36e9SDanylo Vodopianov 		while (internals) {
2464b0cd36e9SDanylo Vodopianov 			for (i = internals->nb_tx_queues - 1; i >= 0; i--) {
2465b0cd36e9SDanylo Vodopianov 				sg_ops->nthw_release_mngd_tx_virt_queue(internals->txq_scg[i].vq);
2466b0cd36e9SDanylo Vodopianov 				release_hw_virtio_queues(&internals->txq_scg[i].hwq);
2467b0cd36e9SDanylo Vodopianov 			}
2468b0cd36e9SDanylo Vodopianov 
2469b0cd36e9SDanylo Vodopianov 			for (i = internals->nb_rx_queues - 1; i >= 0; i--) {
2470b0cd36e9SDanylo Vodopianov 				sg_ops->nthw_release_mngd_rx_virt_queue(internals->rxq_scg[i].vq);
2471b0cd36e9SDanylo Vodopianov 				release_hw_virtio_queues(&internals->rxq_scg[i].hwq);
2472b0cd36e9SDanylo Vodopianov 			}
2473b0cd36e9SDanylo Vodopianov 
2474b0cd36e9SDanylo Vodopianov 			internals = internals->next;
2475b0cd36e9SDanylo Vodopianov 		}
2476b0cd36e9SDanylo Vodopianov 	}
2477b0cd36e9SDanylo Vodopianov 
2478d3dc3627SSerhii Iliushyk 	for (i = 0; i < n_phy_ports; i++) {
2479d3dc3627SSerhii Iliushyk 		sprintf(name, "ntnic%d", i);
2480d3dc3627SSerhii Iliushyk 		eth_dev = rte_eth_dev_allocated(name);
2481d3dc3627SSerhii Iliushyk 		if (eth_dev == NULL)
2482d3dc3627SSerhii Iliushyk 			continue; /* port already released */
2483d3dc3627SSerhii Iliushyk 		rte_eth_dev_release_port(eth_dev);
2484d3dc3627SSerhii Iliushyk 	}
2485d3dc3627SSerhii Iliushyk 
2486c1c13953SSerhii Iliushyk 	nt_vfio_remove(EXCEPTION_PATH_HID);
2487c5cfe765SSerhii Iliushyk 	return 0;
2488c5cfe765SSerhii Iliushyk }
2489c5cfe765SSerhii Iliushyk 
2490a1ba8c47SDanylo Vodopianov static void signal_handler_func_int(int sig)
2491a1ba8c47SDanylo Vodopianov {
2492a1ba8c47SDanylo Vodopianov 	if (sig != SIGINT) {
2493a1ba8c47SDanylo Vodopianov 		signal(sig, previous_handler);
2494a1ba8c47SDanylo Vodopianov 		raise(sig);
2495a1ba8c47SDanylo Vodopianov 		return;
2496a1ba8c47SDanylo Vodopianov 	}
2497a1ba8c47SDanylo Vodopianov 
2498a1ba8c47SDanylo Vodopianov 	kill_pmd = 1;
2499a1ba8c47SDanylo Vodopianov }
2500a1ba8c47SDanylo Vodopianov 
2501a1ba8c47SDanylo Vodopianov THREAD_FUNC shutdown_thread(void *arg __rte_unused)
2502a1ba8c47SDanylo Vodopianov {
2503a1ba8c47SDanylo Vodopianov 	while (!kill_pmd)
2504a1ba8c47SDanylo Vodopianov 		nt_os_wait_usec(100 * 1000);
2505a1ba8c47SDanylo Vodopianov 
2506a1ba8c47SDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "Shutting down because of ctrl+C");
2507a1ba8c47SDanylo Vodopianov 
2508a1ba8c47SDanylo Vodopianov 	signal(SIGINT, previous_handler);
2509a1ba8c47SDanylo Vodopianov 	raise(SIGINT);
2510a1ba8c47SDanylo Vodopianov 
2511a1ba8c47SDanylo Vodopianov 	return THREAD_RETURN;
2512a1ba8c47SDanylo Vodopianov }
2513a1ba8c47SDanylo Vodopianov 
2514a1ba8c47SDanylo Vodopianov static int init_shutdown(void)
2515a1ba8c47SDanylo Vodopianov {
2516a1ba8c47SDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "Starting shutdown handler");
2517a1ba8c47SDanylo Vodopianov 	kill_pmd = 0;
2518a1ba8c47SDanylo Vodopianov 	previous_handler = signal(SIGINT, signal_handler_func_int);
2519a1ba8c47SDanylo Vodopianov 	THREAD_CREATE(&shutdown_tid, shutdown_thread, NULL);
2520a1ba8c47SDanylo Vodopianov 
2521a1ba8c47SDanylo Vodopianov 	/*
2522a1ba8c47SDanylo Vodopianov 	 * 1 time calculation of 1 sec stat update rtc cycles to prevent stat poll
2523a1ba8c47SDanylo Vodopianov 	 * flooding by OVS from multiple virtual port threads - no need to be precise
2524a1ba8c47SDanylo Vodopianov 	 */
2525a1ba8c47SDanylo Vodopianov 	uint64_t now_rtc = rte_get_tsc_cycles();
2526a1ba8c47SDanylo Vodopianov 	nt_os_wait_usec(10 * 1000);
2527a1ba8c47SDanylo Vodopianov 	rte_tsc_freq = 100 * (rte_get_tsc_cycles() - now_rtc);
2528a1ba8c47SDanylo Vodopianov 
2529a1ba8c47SDanylo Vodopianov 	return 0;
2530a1ba8c47SDanylo Vodopianov }
2531a1ba8c47SDanylo Vodopianov 
2532c5cfe765SSerhii Iliushyk static int
2533c5cfe765SSerhii Iliushyk nthw_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2534c5cfe765SSerhii Iliushyk 	struct rte_pci_device *pci_dev)
2535c5cfe765SSerhii Iliushyk {
2536c5cfe765SSerhii Iliushyk 	int ret;
253778b8b4abSSerhii Iliushyk 
25383489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "pcidev: name: '%s'", pci_dev->name);
25393489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "devargs: name: '%s'", pci_dev->device.name);
254078b8b4abSSerhii Iliushyk 
254178b8b4abSSerhii Iliushyk 	if (pci_dev->device.devargs) {
25423489b87bSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "devargs: args: '%s'",
254378b8b4abSSerhii Iliushyk 			(pci_dev->device.devargs->args ? pci_dev->device.devargs->args : "NULL"));
25443489b87bSDanylo Vodopianov 		NT_LOG_DBGX(DBG, NTNIC, "devargs: data: '%s'",
254578b8b4abSSerhii Iliushyk 			(pci_dev->device.devargs->data ? pci_dev->device.devargs->data : "NULL"));
254678b8b4abSSerhii Iliushyk 	}
254778b8b4abSSerhii Iliushyk 
254878b8b4abSSerhii Iliushyk 	const int n_rte_vfio_no_io_mmu_enabled = rte_vfio_noiommu_is_enabled();
25493489b87bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "vfio_no_iommu_enabled=%d", n_rte_vfio_no_io_mmu_enabled);
255078b8b4abSSerhii Iliushyk 
255178b8b4abSSerhii Iliushyk 	if (n_rte_vfio_no_io_mmu_enabled) {
25523489b87bSDanylo Vodopianov 		NT_LOG(ERR, NTNIC, "vfio_no_iommu_enabled=%d: this PMD needs VFIO IOMMU",
255378b8b4abSSerhii Iliushyk 			n_rte_vfio_no_io_mmu_enabled);
255478b8b4abSSerhii Iliushyk 		return -1;
255578b8b4abSSerhii Iliushyk 	}
255678b8b4abSSerhii Iliushyk 
255778b8b4abSSerhii Iliushyk 	const enum rte_iova_mode n_rte_io_va_mode = rte_eal_iova_mode();
25583489b87bSDanylo Vodopianov 	NT_LOG(DBG, NTNIC, "iova mode=%d", n_rte_io_va_mode);
255978b8b4abSSerhii Iliushyk 
256078b8b4abSSerhii Iliushyk 	NT_LOG(DBG, NTNIC,
256178b8b4abSSerhii Iliushyk 		"busid=" PCI_PRI_FMT
25623489b87bSDanylo Vodopianov 		" pciid=%04x:%04x_%04x:%04x locstr=%s @ numanode=%d: drv=%s drvalias=%s",
256378b8b4abSSerhii Iliushyk 		pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
256478b8b4abSSerhii Iliushyk 		pci_dev->addr.function, pci_dev->id.vendor_id, pci_dev->id.device_id,
256578b8b4abSSerhii Iliushyk 		pci_dev->id.subsystem_vendor_id, pci_dev->id.subsystem_device_id,
256678b8b4abSSerhii Iliushyk 		pci_dev->name[0] ? pci_dev->name : "NA",
256778b8b4abSSerhii Iliushyk 		pci_dev->device.numa_node,
256878b8b4abSSerhii Iliushyk 		pci_dev->driver->driver.name ? pci_dev->driver->driver.name : "NA",
256978b8b4abSSerhii Iliushyk 		pci_dev->driver->driver.alias ? pci_dev->driver->driver.alias : "NA");
257078b8b4abSSerhii Iliushyk 
257178b8b4abSSerhii Iliushyk 
2572c5cfe765SSerhii Iliushyk 	ret = nthw_pci_dev_init(pci_dev);
257378b8b4abSSerhii Iliushyk 
2574a1ba8c47SDanylo Vodopianov 	init_shutdown();
2575a1ba8c47SDanylo Vodopianov 
25763489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC, "leave: ret=%d", ret);
2577c5cfe765SSerhii Iliushyk 	return ret;
2578c5cfe765SSerhii Iliushyk }
2579c5cfe765SSerhii Iliushyk 
2580c5cfe765SSerhii Iliushyk static int
2581c5cfe765SSerhii Iliushyk nthw_pci_remove(struct rte_pci_device *pci_dev)
2582c5cfe765SSerhii Iliushyk {
25833489b87bSDanylo Vodopianov 	NT_LOG_DBGX(DBG, NTNIC);
258478b8b4abSSerhii Iliushyk 
2585c93ef6edSSerhii Iliushyk 	struct drv_s *p_drv = get_pdrv_from_pci(pci_dev->addr);
2586c93ef6edSSerhii Iliushyk 	drv_deinit(p_drv);
2587c93ef6edSSerhii Iliushyk 
2588c5cfe765SSerhii Iliushyk 	return rte_eth_dev_pci_generic_remove(pci_dev, nthw_pci_dev_deinit);
2589c5cfe765SSerhii Iliushyk }
2590c5cfe765SSerhii Iliushyk 
2591c5cfe765SSerhii Iliushyk static struct rte_pci_driver rte_nthw_pmd = {
2592c5cfe765SSerhii Iliushyk 	.id_table = nthw_pci_id_map,
2593894509edSSerhii Iliushyk 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2594c5cfe765SSerhii Iliushyk 	.probe = nthw_pci_probe,
2595c5cfe765SSerhii Iliushyk 	.remove = nthw_pci_remove,
2596c5cfe765SSerhii Iliushyk };
2597c5cfe765SSerhii Iliushyk 
2598c5cfe765SSerhii Iliushyk RTE_PMD_REGISTER_PCI(net_ntnic, rte_nthw_pmd);
2599894509edSSerhii Iliushyk RTE_PMD_REGISTER_PCI_TABLE(net_ntnic, nthw_pci_id_map);
2600c1c13953SSerhii Iliushyk RTE_PMD_REGISTER_KMOD_DEP(net_ntnic, "* vfio-pci");
26013489b87bSDanylo Vodopianov 
26023489b87bSDanylo Vodopianov RTE_LOG_REGISTER_SUFFIX(nt_log_general, general, INFO);
26033489b87bSDanylo Vodopianov RTE_LOG_REGISTER_SUFFIX(nt_log_nthw, nthw, INFO);
26043489b87bSDanylo Vodopianov RTE_LOG_REGISTER_SUFFIX(nt_log_filter, filter, INFO);
26053489b87bSDanylo Vodopianov RTE_LOG_REGISTER_SUFFIX(nt_log_ntnic, ntnic, INFO);
2606