xref: /dpdk/drivers/net/ngbe/ngbe_ethdev.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3  * Copyright(c) 2010-2017 Intel Corporation
4  */
5 
6 #include <errno.h>
7 #include <rte_common.h>
8 #include <ethdev_pci.h>
9 
10 #include <rte_alarm.h>
11 
12 #include "ngbe_logs.h"
13 #include "ngbe.h"
14 #include "ngbe_ethdev.h"
15 #include "ngbe_rxtx.h"
16 
17 static int ngbe_dev_close(struct rte_eth_dev *dev);
18 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
19 				int wait_to_complete);
20 
21 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
22 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
23 static int ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
24 static int ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
25 static int ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
26 static void ngbe_dev_interrupt_handler(void *param);
27 static void ngbe_dev_interrupt_delayed_handler(void *param);
28 static void ngbe_configure_msix(struct rte_eth_dev *dev);
29 
30 /*
31  * The set of PCI devices this driver supports
32  */
33 static const struct rte_pci_id pci_id_ngbe_map[] = {
34 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2) },
35 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A2S) },
36 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4) },
37 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A4S) },
38 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2) },
39 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL2S) },
40 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4) },
41 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL4S) },
42 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860NCSI) },
43 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1) },
44 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860A1L) },
45 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, NGBE_DEV_ID_EM_WX1860AL_W) },
46 	{ .vendor_id = 0, /* sentinel */ },
47 };
48 
49 static const struct rte_eth_desc_lim rx_desc_lim = {
50 	.nb_max = NGBE_RING_DESC_MAX,
51 	.nb_min = NGBE_RING_DESC_MIN,
52 	.nb_align = NGBE_RXD_ALIGN,
53 };
54 
55 static const struct rte_eth_desc_lim tx_desc_lim = {
56 	.nb_max = NGBE_RING_DESC_MAX,
57 	.nb_min = NGBE_RING_DESC_MIN,
58 	.nb_align = NGBE_TXD_ALIGN,
59 	.nb_seg_max = NGBE_TX_MAX_SEG,
60 	.nb_mtu_seg_max = NGBE_TX_MAX_SEG,
61 };
62 
63 static const struct eth_dev_ops ngbe_eth_dev_ops;
64 
65 static inline int32_t
66 ngbe_pf_reset_hw(struct ngbe_hw *hw)
67 {
68 	uint32_t ctrl_ext;
69 	int32_t status;
70 
71 	status = hw->mac.reset_hw(hw);
72 
73 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
74 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
75 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
76 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
77 	ngbe_flush(hw);
78 
79 	if (status == NGBE_ERR_SFP_NOT_PRESENT)
80 		status = 0;
81 	return status;
82 }
83 
84 static inline void
85 ngbe_enable_intr(struct rte_eth_dev *dev)
86 {
87 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
88 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
89 
90 	wr32(hw, NGBE_IENMISC, intr->mask_misc);
91 	wr32(hw, NGBE_IMC(0), intr->mask & BIT_MASK32);
92 	ngbe_flush(hw);
93 }
94 
95 static void
96 ngbe_disable_intr(struct ngbe_hw *hw)
97 {
98 	PMD_INIT_FUNC_TRACE();
99 
100 	wr32(hw, NGBE_IMS(0), NGBE_IMS_MASK);
101 	ngbe_flush(hw);
102 }
103 
104 /*
105  * Ensure that all locks are released before first NVM or PHY access
106  */
107 static void
108 ngbe_swfw_lock_reset(struct ngbe_hw *hw)
109 {
110 	uint16_t mask;
111 
112 	/*
113 	 * These ones are more tricky since they are common to all ports; but
114 	 * swfw_sync retries last long enough (1s) to be almost sure that if
115 	 * lock can not be taken it is due to an improper lock of the
116 	 * semaphore.
117 	 */
118 	mask = NGBE_MNGSEM_SWPHY |
119 	       NGBE_MNGSEM_SWMBX |
120 	       NGBE_MNGSEM_SWFLASH;
121 	if (hw->mac.acquire_swfw_sync(hw, mask) < 0)
122 		PMD_DRV_LOG(DEBUG, "SWFW common locks released");
123 
124 	hw->mac.release_swfw_sync(hw, mask);
125 }
126 
127 static int
128 eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
129 {
130 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
131 	struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
132 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
133 	const struct rte_memzone *mz;
134 	uint32_t ctrl_ext;
135 	int err;
136 
137 	PMD_INIT_FUNC_TRACE();
138 
139 	eth_dev->dev_ops = &ngbe_eth_dev_ops;
140 	eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
141 	eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
142 
143 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
144 		return 0;
145 
146 	rte_eth_copy_pci_info(eth_dev, pci_dev);
147 
148 	/* Vendor and Device ID need to be set before init of shared code */
149 	hw->device_id = pci_dev->id.device_id;
150 	hw->vendor_id = pci_dev->id.vendor_id;
151 	hw->sub_system_id = pci_dev->id.subsystem_device_id;
152 	ngbe_map_device_id(hw);
153 	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
154 
155 	/* Reserve memory for interrupt status block */
156 	mz = rte_eth_dma_zone_reserve(eth_dev, "ngbe_driver", -1,
157 		NGBE_ISB_SIZE, NGBE_ALIGN, SOCKET_ID_ANY);
158 	if (mz == NULL)
159 		return -ENOMEM;
160 
161 	hw->isb_dma = TMZ_PADDR(mz);
162 	hw->isb_mem = TMZ_VADDR(mz);
163 
164 	/* Initialize the shared code (base driver) */
165 	err = ngbe_init_shared_code(hw);
166 	if (err != 0) {
167 		PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
168 		return -EIO;
169 	}
170 
171 	/* Unlock any pending hardware semaphore */
172 	ngbe_swfw_lock_reset(hw);
173 
174 	err = hw->rom.init_params(hw);
175 	if (err != 0) {
176 		PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
177 		return -EIO;
178 	}
179 
180 	/* Make sure we have a good EEPROM before we read from it */
181 	err = hw->rom.validate_checksum(hw, NULL);
182 	if (err != 0) {
183 		PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
184 		return -EIO;
185 	}
186 
187 	err = hw->mac.init_hw(hw);
188 	if (err != 0) {
189 		PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
190 		return -EIO;
191 	}
192 
193 	/* disable interrupt */
194 	ngbe_disable_intr(hw);
195 
196 	/* Allocate memory for storing MAC addresses */
197 	eth_dev->data->mac_addrs = rte_zmalloc("ngbe", RTE_ETHER_ADDR_LEN *
198 					       hw->mac.num_rar_entries, 0);
199 	if (eth_dev->data->mac_addrs == NULL) {
200 		PMD_INIT_LOG(ERR,
201 			     "Failed to allocate %u bytes needed to store MAC addresses",
202 			     RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
203 		return -ENOMEM;
204 	}
205 
206 	/* Copy the permanent MAC address */
207 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
208 			&eth_dev->data->mac_addrs[0]);
209 
210 	/* Allocate memory for storing hash filter MAC addresses */
211 	eth_dev->data->hash_mac_addrs = rte_zmalloc("ngbe",
212 			RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC, 0);
213 	if (eth_dev->data->hash_mac_addrs == NULL) {
214 		PMD_INIT_LOG(ERR,
215 			     "Failed to allocate %d bytes needed to store MAC addresses",
216 			     RTE_ETHER_ADDR_LEN * NGBE_VMDQ_NUM_UC_MAC);
217 		rte_free(eth_dev->data->mac_addrs);
218 		eth_dev->data->mac_addrs = NULL;
219 		return -ENOMEM;
220 	}
221 
222 	ctrl_ext = rd32(hw, NGBE_PORTCTL);
223 	/* let hardware know driver is loaded */
224 	ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
225 	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
226 	ctrl_ext |= NGBE_PORTCTL_RSTDONE;
227 	wr32(hw, NGBE_PORTCTL, ctrl_ext);
228 	ngbe_flush(hw);
229 
230 	PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
231 			(int)hw->mac.type, (int)hw->phy.type);
232 
233 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
234 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
235 		     pci_dev->id.device_id);
236 
237 	rte_intr_callback_register(intr_handle,
238 				   ngbe_dev_interrupt_handler, eth_dev);
239 
240 	/* enable uio/vfio intr/eventfd mapping */
241 	rte_intr_enable(intr_handle);
242 
243 	/* enable support intr */
244 	ngbe_enable_intr(eth_dev);
245 
246 	return 0;
247 }
248 
249 static int
250 eth_ngbe_dev_uninit(struct rte_eth_dev *eth_dev)
251 {
252 	PMD_INIT_FUNC_TRACE();
253 
254 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
255 		return 0;
256 
257 	ngbe_dev_close(eth_dev);
258 
259 	return 0;
260 }
261 
262 static int
263 eth_ngbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
264 		struct rte_pci_device *pci_dev)
265 {
266 	return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
267 			sizeof(struct ngbe_adapter),
268 			eth_dev_pci_specific_init, pci_dev,
269 			eth_ngbe_dev_init, NULL);
270 }
271 
272 static int eth_ngbe_pci_remove(struct rte_pci_device *pci_dev)
273 {
274 	struct rte_eth_dev *ethdev;
275 
276 	ethdev = rte_eth_dev_allocated(pci_dev->device.name);
277 	if (ethdev == NULL)
278 		return 0;
279 
280 	return rte_eth_dev_destroy(ethdev, eth_ngbe_dev_uninit);
281 }
282 
283 static struct rte_pci_driver rte_ngbe_pmd = {
284 	.id_table = pci_id_ngbe_map,
285 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING |
286 		     RTE_PCI_DRV_INTR_LSC,
287 	.probe = eth_ngbe_pci_probe,
288 	.remove = eth_ngbe_pci_remove,
289 };
290 
291 static int
292 ngbe_dev_configure(struct rte_eth_dev *dev)
293 {
294 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
295 	struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
296 
297 	PMD_INIT_FUNC_TRACE();
298 
299 	/* set flag to update link status after init */
300 	intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
301 
302 	/*
303 	 * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
304 	 * allocation Rx preconditions we will reset it.
305 	 */
306 	adapter->rx_bulk_alloc_allowed = true;
307 
308 	return 0;
309 }
310 
311 static void
312 ngbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
313 {
314 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
315 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
316 
317 	wr32(hw, NGBE_GPIODIR, NGBE_GPIODIR_DDR(1));
318 	wr32(hw, NGBE_GPIOINTEN, NGBE_GPIOINTEN_INT(3));
319 	wr32(hw, NGBE_GPIOINTTYPE, NGBE_GPIOINTTYPE_LEVEL(0));
320 	if (hw->phy.type == ngbe_phy_yt8521s_sfi)
321 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(0));
322 	else
323 		wr32(hw, NGBE_GPIOINTPOL, NGBE_GPIOINTPOL_ACT(3));
324 
325 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
326 }
327 
328 /*
329  * Configure device link speed and setup link.
330  * It returns 0 on success.
331  */
332 static int
333 ngbe_dev_start(struct rte_eth_dev *dev)
334 {
335 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
336 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
337 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
338 	uint32_t intr_vector = 0;
339 	int err;
340 	bool link_up = false, negotiate = false;
341 	uint32_t speed = 0;
342 	uint32_t allowed_speeds = 0;
343 	int status;
344 	uint32_t *link_speeds;
345 
346 	PMD_INIT_FUNC_TRACE();
347 
348 	/* disable uio/vfio intr/eventfd mapping */
349 	rte_intr_disable(intr_handle);
350 
351 	/* stop adapter */
352 	hw->adapter_stopped = 0;
353 	ngbe_stop_hw(hw);
354 
355 	/* reinitialize adapter, this calls reset and start */
356 	hw->nb_rx_queues = dev->data->nb_rx_queues;
357 	hw->nb_tx_queues = dev->data->nb_tx_queues;
358 	status = ngbe_pf_reset_hw(hw);
359 	if (status != 0)
360 		return -1;
361 	hw->mac.start_hw(hw);
362 	hw->mac.get_link_status = true;
363 
364 	ngbe_dev_phy_intr_setup(dev);
365 
366 	/* check and configure queue intr-vector mapping */
367 	if ((rte_intr_cap_multiple(intr_handle) ||
368 	     !RTE_ETH_DEV_SRIOV(dev).active) &&
369 	    dev->data->dev_conf.intr_conf.rxq != 0) {
370 		intr_vector = dev->data->nb_rx_queues;
371 		if (rte_intr_efd_enable(intr_handle, intr_vector))
372 			return -1;
373 	}
374 
375 	if (rte_intr_dp_is_en(intr_handle) && intr_handle->intr_vec == NULL) {
376 		intr_handle->intr_vec =
377 			rte_zmalloc("intr_vec",
378 				    dev->data->nb_rx_queues * sizeof(int), 0);
379 		if (intr_handle->intr_vec == NULL) {
380 			PMD_INIT_LOG(ERR,
381 				     "Failed to allocate %d rx_queues intr_vec",
382 				     dev->data->nb_rx_queues);
383 			return -ENOMEM;
384 		}
385 	}
386 
387 	/* confiugre MSI-X for sleep until Rx interrupt */
388 	ngbe_configure_msix(dev);
389 
390 	/* initialize transmission unit */
391 	ngbe_dev_tx_init(dev);
392 
393 	/* This can fail when allocating mbufs for descriptor rings */
394 	err = ngbe_dev_rx_init(dev);
395 	if (err != 0) {
396 		PMD_INIT_LOG(ERR, "Unable to initialize Rx hardware");
397 		goto error;
398 	}
399 
400 	err = ngbe_dev_rxtx_start(dev);
401 	if (err < 0) {
402 		PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
403 		goto error;
404 	}
405 
406 	err = hw->mac.check_link(hw, &speed, &link_up, 0);
407 	if (err != 0)
408 		goto error;
409 	dev->data->dev_link.link_status = link_up;
410 
411 	link_speeds = &dev->data->dev_conf.link_speeds;
412 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG)
413 		negotiate = true;
414 
415 	err = hw->mac.get_link_capabilities(hw, &speed, &negotiate);
416 	if (err != 0)
417 		goto error;
418 
419 	allowed_speeds = 0;
420 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_1GB_FULL)
421 		allowed_speeds |= RTE_ETH_LINK_SPEED_1G;
422 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_100M_FULL)
423 		allowed_speeds |= RTE_ETH_LINK_SPEED_100M;
424 	if (hw->mac.default_speeds & NGBE_LINK_SPEED_10M_FULL)
425 		allowed_speeds |= RTE_ETH_LINK_SPEED_10M;
426 
427 	if (*link_speeds & ~allowed_speeds) {
428 		PMD_INIT_LOG(ERR, "Invalid link setting");
429 		goto error;
430 	}
431 
432 	speed = 0x0;
433 	if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
434 		speed = hw->mac.default_speeds;
435 	} else {
436 		if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
437 			speed |= NGBE_LINK_SPEED_1GB_FULL;
438 		if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
439 			speed |= NGBE_LINK_SPEED_100M_FULL;
440 		if (*link_speeds & RTE_ETH_LINK_SPEED_10M)
441 			speed |= NGBE_LINK_SPEED_10M_FULL;
442 	}
443 
444 	hw->phy.init_hw(hw);
445 	err = hw->mac.setup_link(hw, speed, link_up);
446 	if (err != 0)
447 		goto error;
448 
449 	if (rte_intr_allow_others(intr_handle)) {
450 		ngbe_dev_misc_interrupt_setup(dev);
451 		/* check if lsc interrupt is enabled */
452 		if (dev->data->dev_conf.intr_conf.lsc != 0)
453 			ngbe_dev_lsc_interrupt_setup(dev, TRUE);
454 		else
455 			ngbe_dev_lsc_interrupt_setup(dev, FALSE);
456 		ngbe_dev_macsec_interrupt_setup(dev);
457 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
458 	} else {
459 		rte_intr_callback_unregister(intr_handle,
460 					     ngbe_dev_interrupt_handler, dev);
461 		if (dev->data->dev_conf.intr_conf.lsc != 0)
462 			PMD_INIT_LOG(INFO,
463 				     "LSC won't enable because of no intr multiplex");
464 	}
465 
466 	/* check if rxq interrupt is enabled */
467 	if (dev->data->dev_conf.intr_conf.rxq != 0 &&
468 	    rte_intr_dp_is_en(intr_handle))
469 		ngbe_dev_rxq_interrupt_setup(dev);
470 
471 	/* enable UIO/VFIO intr/eventfd mapping */
472 	rte_intr_enable(intr_handle);
473 
474 	/* resume enabled intr since HW reset */
475 	ngbe_enable_intr(dev);
476 
477 	if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
478 		(hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
479 		/* gpio0 is used to power on/off control*/
480 		wr32(hw, NGBE_GPIODATA, 0);
481 	}
482 
483 	/*
484 	 * Update link status right before return, because it may
485 	 * start link configuration process in a separate thread.
486 	 */
487 	ngbe_dev_link_update(dev, 0);
488 
489 	return 0;
490 
491 error:
492 	PMD_INIT_LOG(ERR, "failure in dev start: %d", err);
493 	ngbe_dev_clear_queues(dev);
494 	return -EIO;
495 }
496 
497 /*
498  * Stop device: disable rx and tx functions to allow for reconfiguring.
499  */
500 static int
501 ngbe_dev_stop(struct rte_eth_dev *dev)
502 {
503 	struct rte_eth_link link;
504 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
505 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
506 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
507 
508 	if (hw->adapter_stopped)
509 		return 0;
510 
511 	PMD_INIT_FUNC_TRACE();
512 
513 	if ((hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_M88E1512_SFP ||
514 		(hw->sub_system_id & NGBE_OEM_MASK) == NGBE_LY_YT8521S_SFP) {
515 		/* gpio0 is used to power on/off control*/
516 		wr32(hw, NGBE_GPIODATA, NGBE_GPIOBIT_0);
517 	}
518 
519 	/* disable interrupts */
520 	ngbe_disable_intr(hw);
521 
522 	/* reset the NIC */
523 	ngbe_pf_reset_hw(hw);
524 	hw->adapter_stopped = 0;
525 
526 	/* stop adapter */
527 	ngbe_stop_hw(hw);
528 
529 	ngbe_dev_clear_queues(dev);
530 
531 	/* Clear recorded link status */
532 	memset(&link, 0, sizeof(link));
533 	rte_eth_linkstatus_set(dev, &link);
534 
535 	if (!rte_intr_allow_others(intr_handle))
536 		/* resume to the default handler */
537 		rte_intr_callback_register(intr_handle,
538 					   ngbe_dev_interrupt_handler,
539 					   (void *)dev);
540 
541 	/* Clean datapath event and queue/vec mapping */
542 	rte_intr_efd_disable(intr_handle);
543 	if (intr_handle->intr_vec != NULL) {
544 		rte_free(intr_handle->intr_vec);
545 		intr_handle->intr_vec = NULL;
546 	}
547 
548 	hw->adapter_stopped = true;
549 	dev->data->dev_started = 0;
550 
551 	return 0;
552 }
553 
554 /*
555  * Reset and stop device.
556  */
557 static int
558 ngbe_dev_close(struct rte_eth_dev *dev)
559 {
560 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
561 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
562 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
563 	int retries = 0;
564 	int ret;
565 
566 	PMD_INIT_FUNC_TRACE();
567 
568 	ngbe_pf_reset_hw(hw);
569 
570 	ngbe_dev_stop(dev);
571 
572 	ngbe_dev_free_queues(dev);
573 
574 	/* reprogram the RAR[0] in case user changed it. */
575 	ngbe_set_rar(hw, 0, hw->mac.addr, 0, true);
576 
577 	/* Unlock any pending hardware semaphore */
578 	ngbe_swfw_lock_reset(hw);
579 
580 	/* disable uio intr before callback unregister */
581 	rte_intr_disable(intr_handle);
582 
583 	do {
584 		ret = rte_intr_callback_unregister(intr_handle,
585 				ngbe_dev_interrupt_handler, dev);
586 		if (ret >= 0 || ret == -ENOENT) {
587 			break;
588 		} else if (ret != -EAGAIN) {
589 			PMD_INIT_LOG(ERR,
590 				"intr callback unregister failed: %d",
591 				ret);
592 		}
593 		rte_delay_ms(100);
594 	} while (retries++ < (10 + NGBE_LINK_UP_TIME));
595 
596 	rte_free(dev->data->mac_addrs);
597 	dev->data->mac_addrs = NULL;
598 
599 	rte_free(dev->data->hash_mac_addrs);
600 	dev->data->hash_mac_addrs = NULL;
601 
602 	return ret;
603 }
604 
605 /*
606  * Reset PF device.
607  */
608 static int
609 ngbe_dev_reset(struct rte_eth_dev *dev)
610 {
611 	int ret;
612 
613 	ret = eth_ngbe_dev_uninit(dev);
614 	if (ret != 0)
615 		return ret;
616 
617 	ret = eth_ngbe_dev_init(dev, NULL);
618 
619 	return ret;
620 }
621 
622 static int
623 ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
624 {
625 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
626 
627 	dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
628 	dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
629 	dev_info->min_rx_bufsize = 1024;
630 	dev_info->max_rx_pktlen = 15872;
631 
632 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
633 		.rx_thresh = {
634 			.pthresh = NGBE_DEFAULT_RX_PTHRESH,
635 			.hthresh = NGBE_DEFAULT_RX_HTHRESH,
636 			.wthresh = NGBE_DEFAULT_RX_WTHRESH,
637 		},
638 		.rx_free_thresh = NGBE_DEFAULT_RX_FREE_THRESH,
639 		.rx_drop_en = 0,
640 		.offloads = 0,
641 	};
642 
643 	dev_info->default_txconf = (struct rte_eth_txconf) {
644 		.tx_thresh = {
645 			.pthresh = NGBE_DEFAULT_TX_PTHRESH,
646 			.hthresh = NGBE_DEFAULT_TX_HTHRESH,
647 			.wthresh = NGBE_DEFAULT_TX_WTHRESH,
648 		},
649 		.tx_free_thresh = NGBE_DEFAULT_TX_FREE_THRESH,
650 		.offloads = 0,
651 	};
652 
653 	dev_info->rx_desc_lim = rx_desc_lim;
654 	dev_info->tx_desc_lim = tx_desc_lim;
655 
656 	dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_100M |
657 				RTE_ETH_LINK_SPEED_10M;
658 
659 	/* Driver-preferred Rx/Tx parameters */
660 	dev_info->default_rxportconf.burst_size = 32;
661 	dev_info->default_txportconf.burst_size = 32;
662 	dev_info->default_rxportconf.nb_queues = 1;
663 	dev_info->default_txportconf.nb_queues = 1;
664 	dev_info->default_rxportconf.ring_size = 256;
665 	dev_info->default_txportconf.ring_size = 256;
666 
667 	return 0;
668 }
669 
670 /* return 0 means link status changed, -1 means not changed */
671 int
672 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
673 			    int wait_to_complete)
674 {
675 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
676 	struct rte_eth_link link;
677 	u32 link_speed = NGBE_LINK_SPEED_UNKNOWN;
678 	u32 lan_speed = 0;
679 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
680 	bool link_up;
681 	int err;
682 	int wait = 1;
683 
684 	memset(&link, 0, sizeof(link));
685 	link.link_status = RTE_ETH_LINK_DOWN;
686 	link.link_speed = RTE_ETH_SPEED_NUM_NONE;
687 	link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
688 	link.link_autoneg = !(dev->data->dev_conf.link_speeds &
689 			~RTE_ETH_LINK_SPEED_AUTONEG);
690 
691 	hw->mac.get_link_status = true;
692 
693 	if (intr->flags & NGBE_FLAG_NEED_LINK_CONFIG)
694 		return rte_eth_linkstatus_set(dev, &link);
695 
696 	/* check if it needs to wait to complete, if lsc interrupt is enabled */
697 	if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
698 		wait = 0;
699 
700 	err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
701 	if (err != 0) {
702 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
703 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
704 		return rte_eth_linkstatus_set(dev, &link);
705 	}
706 
707 	if (!link_up)
708 		return rte_eth_linkstatus_set(dev, &link);
709 
710 	intr->flags &= ~NGBE_FLAG_NEED_LINK_CONFIG;
711 	link.link_status = RTE_ETH_LINK_UP;
712 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
713 
714 	switch (link_speed) {
715 	default:
716 	case NGBE_LINK_SPEED_UNKNOWN:
717 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
718 		break;
719 
720 	case NGBE_LINK_SPEED_10M_FULL:
721 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
722 		lan_speed = 0;
723 		break;
724 
725 	case NGBE_LINK_SPEED_100M_FULL:
726 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
727 		lan_speed = 1;
728 		break;
729 
730 	case NGBE_LINK_SPEED_1GB_FULL:
731 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
732 		lan_speed = 2;
733 		break;
734 	}
735 
736 	if (hw->is_pf) {
737 		wr32m(hw, NGBE_LAN_SPEED, NGBE_LAN_SPEED_MASK, lan_speed);
738 		if (link_speed & (NGBE_LINK_SPEED_1GB_FULL |
739 				NGBE_LINK_SPEED_100M_FULL |
740 				NGBE_LINK_SPEED_10M_FULL)) {
741 			wr32m(hw, NGBE_MACTXCFG, NGBE_MACTXCFG_SPEED_MASK,
742 				NGBE_MACTXCFG_SPEED_1G | NGBE_MACTXCFG_TE);
743 		}
744 	}
745 
746 	return rte_eth_linkstatus_set(dev, &link);
747 }
748 
749 static int
750 ngbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
751 {
752 	return ngbe_dev_link_update_share(dev, wait_to_complete);
753 }
754 
755 /**
756  * It clears the interrupt causes and enables the interrupt.
757  * It will be called once only during NIC initialized.
758  *
759  * @param dev
760  *  Pointer to struct rte_eth_dev.
761  * @param on
762  *  Enable or Disable.
763  *
764  * @return
765  *  - On success, zero.
766  *  - On failure, a negative value.
767  */
768 static int
769 ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
770 {
771 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
772 
773 	ngbe_dev_link_status_print(dev);
774 	if (on != 0) {
775 		intr->mask_misc |= NGBE_ICRMISC_PHY;
776 		intr->mask_misc |= NGBE_ICRMISC_GPIO;
777 	} else {
778 		intr->mask_misc &= ~NGBE_ICRMISC_PHY;
779 		intr->mask_misc &= ~NGBE_ICRMISC_GPIO;
780 	}
781 
782 	return 0;
783 }
784 
785 /**
786  * It clears the interrupt causes and enables the interrupt.
787  * It will be called once only during NIC initialized.
788  *
789  * @param dev
790  *  Pointer to struct rte_eth_dev.
791  *
792  * @return
793  *  - On success, zero.
794  *  - On failure, a negative value.
795  */
796 static int
797 ngbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
798 {
799 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
800 	u64 mask;
801 
802 	mask = NGBE_ICR_MASK;
803 	mask &= (1ULL << NGBE_MISC_VEC_ID);
804 	intr->mask |= mask;
805 	intr->mask_misc |= NGBE_ICRMISC_GPIO;
806 
807 	return 0;
808 }
809 
810 /**
811  * It clears the interrupt causes and enables the interrupt.
812  * It will be called once only during NIC initialized.
813  *
814  * @param dev
815  *  Pointer to struct rte_eth_dev.
816  *
817  * @return
818  *  - On success, zero.
819  *  - On failure, a negative value.
820  */
821 static int
822 ngbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
823 {
824 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
825 	u64 mask;
826 
827 	mask = NGBE_ICR_MASK;
828 	mask &= ~((1ULL << NGBE_RX_VEC_START) - 1);
829 	intr->mask |= mask;
830 
831 	return 0;
832 }
833 
834 /**
835  * It clears the interrupt causes and enables the interrupt.
836  * It will be called once only during NIC initialized.
837  *
838  * @param dev
839  *  Pointer to struct rte_eth_dev.
840  *
841  * @return
842  *  - On success, zero.
843  *  - On failure, a negative value.
844  */
845 static int
846 ngbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
847 {
848 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
849 
850 	intr->mask_misc |= NGBE_ICRMISC_LNKSEC;
851 
852 	return 0;
853 }
854 
855 /*
856  * It reads ICR and sets flag for the link_update.
857  *
858  * @param dev
859  *  Pointer to struct rte_eth_dev.
860  *
861  * @return
862  *  - On success, zero.
863  *  - On failure, a negative value.
864  */
865 static int
866 ngbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
867 {
868 	uint32_t eicr;
869 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
870 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
871 
872 	/* clear all cause mask */
873 	ngbe_disable_intr(hw);
874 
875 	/* read-on-clear nic registers here */
876 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
877 	PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
878 
879 	intr->flags = 0;
880 
881 	/* set flag for async link update */
882 	if (eicr & NGBE_ICRMISC_PHY)
883 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
884 
885 	if (eicr & NGBE_ICRMISC_VFMBX)
886 		intr->flags |= NGBE_FLAG_MAILBOX;
887 
888 	if (eicr & NGBE_ICRMISC_LNKSEC)
889 		intr->flags |= NGBE_FLAG_MACSEC;
890 
891 	if (eicr & NGBE_ICRMISC_GPIO)
892 		intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
893 
894 	return 0;
895 }
896 
897 /**
898  * It gets and then prints the link status.
899  *
900  * @param dev
901  *  Pointer to struct rte_eth_dev.
902  *
903  * @return
904  *  - On success, zero.
905  *  - On failure, a negative value.
906  */
907 static void
908 ngbe_dev_link_status_print(struct rte_eth_dev *dev)
909 {
910 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
911 	struct rte_eth_link link;
912 
913 	rte_eth_linkstatus_get(dev, &link);
914 
915 	if (link.link_status == RTE_ETH_LINK_UP) {
916 		PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
917 					(int)(dev->data->port_id),
918 					(unsigned int)link.link_speed,
919 			link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
920 					"full-duplex" : "half-duplex");
921 	} else {
922 		PMD_INIT_LOG(INFO, " Port %d: Link Down",
923 				(int)(dev->data->port_id));
924 	}
925 	PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
926 				pci_dev->addr.domain,
927 				pci_dev->addr.bus,
928 				pci_dev->addr.devid,
929 				pci_dev->addr.function);
930 }
931 
932 /*
933  * It executes link_update after knowing an interrupt occurred.
934  *
935  * @param dev
936  *  Pointer to struct rte_eth_dev.
937  *
938  * @return
939  *  - On success, zero.
940  *  - On failure, a negative value.
941  */
942 static int
943 ngbe_dev_interrupt_action(struct rte_eth_dev *dev)
944 {
945 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
946 	int64_t timeout;
947 
948 	PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
949 
950 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
951 		struct rte_eth_link link;
952 
953 		/*get the link status before link update, for predicting later*/
954 		rte_eth_linkstatus_get(dev, &link);
955 
956 		ngbe_dev_link_update(dev, 0);
957 
958 		/* likely to up */
959 		if (link.link_status != RTE_ETH_LINK_UP)
960 			/* handle it 1 sec later, wait it being stable */
961 			timeout = NGBE_LINK_UP_CHECK_TIMEOUT;
962 		/* likely to down */
963 		else
964 			/* handle it 4 sec later, wait it being stable */
965 			timeout = NGBE_LINK_DOWN_CHECK_TIMEOUT;
966 
967 		ngbe_dev_link_status_print(dev);
968 		if (rte_eal_alarm_set(timeout * 1000,
969 				      ngbe_dev_interrupt_delayed_handler,
970 				      (void *)dev) < 0) {
971 			PMD_DRV_LOG(ERR, "Error setting alarm");
972 		} else {
973 			/* remember original mask */
974 			intr->mask_misc_orig = intr->mask_misc;
975 			/* only disable lsc interrupt */
976 			intr->mask_misc &= ~NGBE_ICRMISC_PHY;
977 
978 			intr->mask_orig = intr->mask;
979 			/* only disable all misc interrupts */
980 			intr->mask &= ~(1ULL << NGBE_MISC_VEC_ID);
981 		}
982 	}
983 
984 	PMD_DRV_LOG(DEBUG, "enable intr immediately");
985 	ngbe_enable_intr(dev);
986 
987 	return 0;
988 }
989 
990 /**
991  * Interrupt handler which shall be registered for alarm callback for delayed
992  * handling specific interrupt to wait for the stable nic state. As the
993  * NIC interrupt state is not stable for ngbe after link is just down,
994  * it needs to wait 4 seconds to get the stable status.
995  *
996  * @param param
997  *  The address of parameter (struct rte_eth_dev *) registered before.
998  */
999 static void
1000 ngbe_dev_interrupt_delayed_handler(void *param)
1001 {
1002 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1003 	struct ngbe_interrupt *intr = ngbe_dev_intr(dev);
1004 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1005 	uint32_t eicr;
1006 
1007 	ngbe_disable_intr(hw);
1008 
1009 	eicr = ((u32 *)hw->isb_mem)[NGBE_ISB_MISC];
1010 
1011 	if (intr->flags & NGBE_FLAG_NEED_LINK_UPDATE) {
1012 		ngbe_dev_link_update(dev, 0);
1013 		intr->flags &= ~NGBE_FLAG_NEED_LINK_UPDATE;
1014 		ngbe_dev_link_status_print(dev);
1015 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
1016 					      NULL);
1017 	}
1018 
1019 	if (intr->flags & NGBE_FLAG_MACSEC) {
1020 		rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
1021 					      NULL);
1022 		intr->flags &= ~NGBE_FLAG_MACSEC;
1023 	}
1024 
1025 	/* restore original mask */
1026 	intr->mask_misc = intr->mask_misc_orig;
1027 	intr->mask_misc_orig = 0;
1028 	intr->mask = intr->mask_orig;
1029 	intr->mask_orig = 0;
1030 
1031 	PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
1032 	ngbe_enable_intr(dev);
1033 }
1034 
1035 /**
1036  * Interrupt handler triggered by NIC  for handling
1037  * specific interrupt.
1038  *
1039  * @param param
1040  *  The address of parameter (struct rte_eth_dev *) registered before.
1041  */
1042 static void
1043 ngbe_dev_interrupt_handler(void *param)
1044 {
1045 	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1046 
1047 	ngbe_dev_interrupt_get_status(dev);
1048 	ngbe_dev_interrupt_action(dev);
1049 }
1050 
1051 /**
1052  * Set the IVAR registers, mapping interrupt causes to vectors
1053  * @param hw
1054  *  pointer to ngbe_hw struct
1055  * @direction
1056  *  0 for Rx, 1 for Tx, -1 for other causes
1057  * @queue
1058  *  queue to map the corresponding interrupt to
1059  * @msix_vector
1060  *  the vector to map to the corresponding queue
1061  */
1062 void
1063 ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
1064 		   uint8_t queue, uint8_t msix_vector)
1065 {
1066 	uint32_t tmp, idx;
1067 
1068 	if (direction == -1) {
1069 		/* other causes */
1070 		msix_vector |= NGBE_IVARMISC_VLD;
1071 		idx = 0;
1072 		tmp = rd32(hw, NGBE_IVARMISC);
1073 		tmp &= ~(0xFF << idx);
1074 		tmp |= (msix_vector << idx);
1075 		wr32(hw, NGBE_IVARMISC, tmp);
1076 	} else {
1077 		/* rx or tx causes */
1078 		/* Workround for ICR lost */
1079 		idx = ((16 * (queue & 1)) + (8 * direction));
1080 		tmp = rd32(hw, NGBE_IVAR(queue >> 1));
1081 		tmp &= ~(0xFF << idx);
1082 		tmp |= (msix_vector << idx);
1083 		wr32(hw, NGBE_IVAR(queue >> 1), tmp);
1084 	}
1085 }
1086 
1087 /**
1088  * Sets up the hardware to properly generate MSI-X interrupts
1089  * @hw
1090  *  board private structure
1091  */
1092 static void
1093 ngbe_configure_msix(struct rte_eth_dev *dev)
1094 {
1095 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1096 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1097 	struct ngbe_hw *hw = ngbe_dev_hw(dev);
1098 	uint32_t queue_id, base = NGBE_MISC_VEC_ID;
1099 	uint32_t vec = NGBE_MISC_VEC_ID;
1100 	uint32_t gpie;
1101 
1102 	/*
1103 	 * Won't configure MSI-X register if no mapping is done
1104 	 * between intr vector and event fd
1105 	 * but if MSI-X has been enabled already, need to configure
1106 	 * auto clean, auto mask and throttling.
1107 	 */
1108 	gpie = rd32(hw, NGBE_GPIE);
1109 	if (!rte_intr_dp_is_en(intr_handle) &&
1110 	    !(gpie & NGBE_GPIE_MSIX))
1111 		return;
1112 
1113 	if (rte_intr_allow_others(intr_handle)) {
1114 		base = NGBE_RX_VEC_START;
1115 		vec = base;
1116 	}
1117 
1118 	/* setup GPIE for MSI-X mode */
1119 	gpie = rd32(hw, NGBE_GPIE);
1120 	gpie |= NGBE_GPIE_MSIX;
1121 	wr32(hw, NGBE_GPIE, gpie);
1122 
1123 	/* Populate the IVAR table and set the ITR values to the
1124 	 * corresponding register.
1125 	 */
1126 	if (rte_intr_dp_is_en(intr_handle)) {
1127 		for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
1128 			queue_id++) {
1129 			/* by default, 1:1 mapping */
1130 			ngbe_set_ivar_map(hw, 0, queue_id, vec);
1131 			intr_handle->intr_vec[queue_id] = vec;
1132 			if (vec < base + intr_handle->nb_efd - 1)
1133 				vec++;
1134 		}
1135 
1136 		ngbe_set_ivar_map(hw, -1, 1, NGBE_MISC_VEC_ID);
1137 	}
1138 	wr32(hw, NGBE_ITR(NGBE_MISC_VEC_ID),
1139 			NGBE_ITR_IVAL_1G(NGBE_QUEUE_ITR_INTERVAL_DEFAULT)
1140 			| NGBE_ITR_WRDSA);
1141 }
1142 
1143 static const struct eth_dev_ops ngbe_eth_dev_ops = {
1144 	.dev_configure              = ngbe_dev_configure,
1145 	.dev_infos_get              = ngbe_dev_info_get,
1146 	.dev_start                  = ngbe_dev_start,
1147 	.dev_stop                   = ngbe_dev_stop,
1148 	.dev_close                  = ngbe_dev_close,
1149 	.dev_reset                  = ngbe_dev_reset,
1150 	.link_update                = ngbe_dev_link_update,
1151 	.rx_queue_start	            = ngbe_dev_rx_queue_start,
1152 	.rx_queue_stop              = ngbe_dev_rx_queue_stop,
1153 	.tx_queue_start	            = ngbe_dev_tx_queue_start,
1154 	.tx_queue_stop              = ngbe_dev_tx_queue_stop,
1155 	.rx_queue_setup             = ngbe_dev_rx_queue_setup,
1156 	.rx_queue_release           = ngbe_dev_rx_queue_release,
1157 	.tx_queue_setup             = ngbe_dev_tx_queue_setup,
1158 	.tx_queue_release           = ngbe_dev_tx_queue_release,
1159 };
1160 
1161 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
1162 RTE_PMD_REGISTER_PCI_TABLE(net_ngbe, pci_id_ngbe_map);
1163 RTE_PMD_REGISTER_KMOD_DEP(net_ngbe, "* igb_uio | uio_pci_generic | vfio-pci");
1164 
1165 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_init, init, NOTICE);
1166 RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_driver, driver, NOTICE);
1167 
1168 #ifdef RTE_ETHDEV_DEBUG_RX
1169 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_rx, rx, DEBUG);
1170 #endif
1171 #ifdef RTE_ETHDEV_DEBUG_TX
1172 	RTE_LOG_REGISTER_SUFFIX(ngbe_logtype_tx, tx, DEBUG);
1173 #endif
1174