1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Cesnet
3 * Copyright(c) 2019 Netcope Technologies, a.s. <info@netcope.com>
4 * All rights reserved.
5 */
6
7 #include <nfb/nfb.h>
8 #include <nfb/ndp.h>
9 #include <netcope/rxmac.h>
10 #include <netcope/txmac.h>
11
12 #include <ethdev_pci.h>
13 #include <rte_kvargs.h>
14
15 #include "nfb_stats.h"
16 #include "nfb_rx.h"
17 #include "nfb_tx.h"
18 #include "nfb_rxmode.h"
19 #include "nfb.h"
20
21 /**
22 * Default MAC addr
23 */
24 static const struct rte_ether_addr eth_addr = {
25 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
26 };
27
28 /**
29 * Open all RX DMA queues
30 *
31 * @param dev
32 * Pointer to nfb device.
33 * @param[out] rxmac
34 * Pointer to output array of nc_rxmac
35 * @param[out] max_rxmac
36 * Pointer to output max index of rxmac
37 */
38 static void
nfb_nc_rxmac_init(struct nfb_device * nfb,struct nc_rxmac * rxmac[RTE_MAX_NC_RXMAC],uint16_t * max_rxmac)39 nfb_nc_rxmac_init(struct nfb_device *nfb,
40 struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
41 uint16_t *max_rxmac)
42 {
43 *max_rxmac = 0;
44 while ((rxmac[*max_rxmac] = nc_rxmac_open_index(nfb, *max_rxmac)))
45 ++(*max_rxmac);
46 }
47
48 /**
49 * Open all TX DMA queues
50 *
51 * @param dev
52 * Pointer to nfb device.
53 * @param[out] txmac
54 * Pointer to output array of nc_txmac
55 * @param[out] max_rxmac
56 * Pointer to output max index of txmac
57 */
58 static void
nfb_nc_txmac_init(struct nfb_device * nfb,struct nc_txmac * txmac[RTE_MAX_NC_TXMAC],uint16_t * max_txmac)59 nfb_nc_txmac_init(struct nfb_device *nfb,
60 struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
61 uint16_t *max_txmac)
62 {
63 *max_txmac = 0;
64 while ((txmac[*max_txmac] = nc_txmac_open_index(nfb, *max_txmac)))
65 ++(*max_txmac);
66 }
67
68 /**
69 * Close all RX DMA queues
70 *
71 * @param rxmac
72 * Pointer to array of nc_rxmac
73 * @param max_rxmac
74 * Maximum index of rxmac
75 */
76 static void
nfb_nc_rxmac_deinit(struct nc_rxmac * rxmac[RTE_MAX_NC_RXMAC],uint16_t max_rxmac)77 nfb_nc_rxmac_deinit(struct nc_rxmac *rxmac[RTE_MAX_NC_RXMAC],
78 uint16_t max_rxmac)
79 {
80 uint16_t i;
81 for (i = 0; i < max_rxmac; i++) {
82 nc_rxmac_close(rxmac[i]);
83 rxmac[i] = NULL;
84 }
85 }
86
87 /**
88 * Close all TX DMA queues
89 *
90 * @param txmac
91 * Pointer to array of nc_txmac
92 * @param max_txmac
93 * Maximum index of txmac
94 */
95 static void
nfb_nc_txmac_deinit(struct nc_txmac * txmac[RTE_MAX_NC_TXMAC],uint16_t max_txmac)96 nfb_nc_txmac_deinit(struct nc_txmac *txmac[RTE_MAX_NC_TXMAC],
97 uint16_t max_txmac)
98 {
99 uint16_t i;
100 for (i = 0; i < max_txmac; i++) {
101 nc_txmac_close(txmac[i]);
102 txmac[i] = NULL;
103 }
104 }
105
106 /**
107 * DPDK callback to start the device.
108 *
109 * Start device by starting all configured queues.
110 *
111 * @param dev
112 * Pointer to Ethernet device structure.
113 *
114 * @return
115 * 0 on success, a negative errno value otherwise.
116 */
117 static int
nfb_eth_dev_start(struct rte_eth_dev * dev)118 nfb_eth_dev_start(struct rte_eth_dev *dev)
119 {
120 int ret;
121 uint16_t i;
122 uint16_t nb_rx = dev->data->nb_rx_queues;
123 uint16_t nb_tx = dev->data->nb_tx_queues;
124
125 for (i = 0; i < nb_rx; i++) {
126 ret = nfb_eth_rx_queue_start(dev, i);
127 if (ret != 0)
128 goto err_rx;
129 }
130
131 for (i = 0; i < nb_tx; i++) {
132 ret = nfb_eth_tx_queue_start(dev, i);
133 if (ret != 0)
134 goto err_tx;
135 }
136
137 return 0;
138
139 err_tx:
140 for (i = 0; i < nb_tx; i++)
141 nfb_eth_tx_queue_stop(dev, i);
142 err_rx:
143 for (i = 0; i < nb_rx; i++)
144 nfb_eth_rx_queue_stop(dev, i);
145 return ret;
146 }
147
148 /**
149 * DPDK callback to stop the device.
150 *
151 * Stop device by stopping all configured queues.
152 *
153 * @param dev
154 * Pointer to Ethernet device structure.
155 */
156 static int
nfb_eth_dev_stop(struct rte_eth_dev * dev)157 nfb_eth_dev_stop(struct rte_eth_dev *dev)
158 {
159 uint16_t i;
160 uint16_t nb_rx = dev->data->nb_rx_queues;
161 uint16_t nb_tx = dev->data->nb_tx_queues;
162
163 dev->data->dev_started = 0;
164
165 for (i = 0; i < nb_tx; i++)
166 nfb_eth_tx_queue_stop(dev, i);
167
168 for (i = 0; i < nb_rx; i++)
169 nfb_eth_rx_queue_stop(dev, i);
170
171 return 0;
172 }
173
174 /**
175 * DPDK callback for Ethernet device configuration.
176 *
177 * @param dev
178 * Pointer to Ethernet device structure.
179 *
180 * @return
181 * 0 on success, a negative errno value otherwise.
182 */
183 static int
nfb_eth_dev_configure(struct rte_eth_dev * dev __rte_unused)184 nfb_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
185 {
186 int ret;
187 struct pmd_internals *internals = dev->data->dev_private;
188 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
189
190 if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
191 ret = rte_mbuf_dyn_rx_timestamp_register
192 (&nfb_timestamp_dynfield_offset,
193 &nfb_timestamp_rx_dynflag);
194 if (ret != 0) {
195 NFB_LOG(ERR, "Cannot register Rx timestamp field/flag %d", ret);
196 nfb_close(internals->nfb);
197 return -rte_errno;
198 }
199 }
200
201 return 0;
202 }
203
204 static uint32_t
nfb_eth_get_max_mac_address_count(struct rte_eth_dev * dev)205 nfb_eth_get_max_mac_address_count(struct rte_eth_dev *dev)
206 {
207 uint16_t i;
208 uint32_t c;
209 uint32_t ret = (uint32_t)-1;
210 struct pmd_internals *internals = dev->data->dev_private;
211
212 /*
213 * Go through all RX MAC components in firmware and find
214 * the minimal indicated space size for MAC addresses.
215 */
216 for (i = 0; i < internals->max_rxmac; i++) {
217 c = nc_rxmac_mac_address_count(internals->rxmac[i]);
218 ret = RTE_MIN(c, ret);
219 }
220
221 /* The driver must support at least 1 MAC address, pretend that */
222 if (internals->max_rxmac == 0 || ret == 0)
223 ret = 1;
224
225 return ret;
226 }
227
228 /**
229 * DPDK callback to get information about the device.
230 *
231 * @param dev
232 * Pointer to Ethernet device structure.
233 * @param[out] info
234 * Info structure output buffer.
235 */
236 static int
nfb_eth_dev_info(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)237 nfb_eth_dev_info(struct rte_eth_dev *dev,
238 struct rte_eth_dev_info *dev_info)
239 {
240 dev_info->max_mac_addrs = nfb_eth_get_max_mac_address_count(dev);
241
242 dev_info->max_rx_pktlen = (uint32_t)-1;
243 dev_info->max_rx_queues = dev->data->nb_rx_queues;
244 dev_info->max_tx_queues = dev->data->nb_tx_queues;
245 dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G;
246 dev_info->rx_offload_capa =
247 RTE_ETH_RX_OFFLOAD_TIMESTAMP;
248
249 return 0;
250 }
251
252 /**
253 * DPDK callback to close the device.
254 *
255 * Destroy all queues and objects, free memory.
256 *
257 * @param dev
258 * Pointer to Ethernet device structure.
259 */
260 static int
nfb_eth_dev_close(struct rte_eth_dev * dev)261 nfb_eth_dev_close(struct rte_eth_dev *dev)
262 {
263 struct pmd_internals *internals = dev->data->dev_private;
264 uint16_t i;
265 uint16_t nb_rx = dev->data->nb_rx_queues;
266 uint16_t nb_tx = dev->data->nb_tx_queues;
267 int ret;
268
269 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
270 return 0;
271
272 ret = nfb_eth_dev_stop(dev);
273
274 nfb_nc_rxmac_deinit(internals->rxmac, internals->max_rxmac);
275 nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
276
277 for (i = 0; i < nb_rx; i++) {
278 nfb_eth_rx_queue_release(dev, i);
279 dev->data->rx_queues[i] = NULL;
280 }
281 dev->data->nb_rx_queues = 0;
282 for (i = 0; i < nb_tx; i++) {
283 nfb_eth_tx_queue_release(dev, i);
284 dev->data->tx_queues[i] = NULL;
285 }
286 dev->data->nb_tx_queues = 0;
287
288 return ret;
289 }
290
291 /**
292 * DPDK callback to retrieve physical link information.
293 *
294 * @param dev
295 * Pointer to Ethernet device structure.
296 * @param[out] link
297 * Storage for current link status.
298 *
299 * @return
300 * 0 on success, a negative errno value otherwise.
301 */
302 static int
nfb_eth_link_update(struct rte_eth_dev * dev,int wait_to_complete __rte_unused)303 nfb_eth_link_update(struct rte_eth_dev *dev,
304 int wait_to_complete __rte_unused)
305 {
306 uint16_t i;
307 struct nc_rxmac_status status;
308 struct rte_eth_link link;
309 memset(&link, 0, sizeof(link));
310
311 struct pmd_internals *internals = dev->data->dev_private;
312
313 status.speed = MAC_SPEED_UNKNOWN;
314
315 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
316 link.link_status = RTE_ETH_LINK_DOWN;
317 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
318 link.link_autoneg = RTE_ETH_LINK_SPEED_FIXED;
319
320 if (internals->rxmac[0] != NULL) {
321 nc_rxmac_read_status(internals->rxmac[0], &status);
322
323 switch (status.speed) {
324 case MAC_SPEED_10G:
325 link.link_speed = RTE_ETH_SPEED_NUM_10G;
326 break;
327 case MAC_SPEED_40G:
328 link.link_speed = RTE_ETH_SPEED_NUM_40G;
329 break;
330 case MAC_SPEED_100G:
331 link.link_speed = RTE_ETH_SPEED_NUM_100G;
332 break;
333 default:
334 link.link_speed = RTE_ETH_SPEED_NUM_NONE;
335 break;
336 }
337 }
338
339 for (i = 0; i < internals->max_rxmac; ++i) {
340 nc_rxmac_read_status(internals->rxmac[i], &status);
341
342 if (status.enabled && status.link_up) {
343 link.link_status = RTE_ETH_LINK_UP;
344 break;
345 }
346 }
347
348 rte_eth_linkstatus_set(dev, &link);
349
350 return 0;
351 }
352
353 /**
354 * DPDK callback to bring the link UP.
355 *
356 * @param dev
357 * Pointer to Ethernet device structure.
358 *
359 * @return
360 * 0 on success, a negative errno value otherwise.
361 */
362 static int
nfb_eth_dev_set_link_up(struct rte_eth_dev * dev)363 nfb_eth_dev_set_link_up(struct rte_eth_dev *dev)
364 {
365 struct pmd_internals *internals = (struct pmd_internals *)
366 dev->data->dev_private;
367
368 uint16_t i;
369 for (i = 0; i < internals->max_rxmac; ++i)
370 nc_rxmac_enable(internals->rxmac[i]);
371
372 for (i = 0; i < internals->max_txmac; ++i)
373 nc_txmac_enable(internals->txmac[i]);
374
375 return 0;
376 }
377
378 /**
379 * DPDK callback to bring the link DOWN.
380 *
381 * @param dev
382 * Pointer to Ethernet device structure.
383 *
384 * @return
385 * 0 on success, a negative errno value otherwise.
386 */
387 static int
nfb_eth_dev_set_link_down(struct rte_eth_dev * dev)388 nfb_eth_dev_set_link_down(struct rte_eth_dev *dev)
389 {
390 struct pmd_internals *internals = (struct pmd_internals *)
391 dev->data->dev_private;
392
393 uint16_t i;
394 for (i = 0; i < internals->max_rxmac; ++i)
395 nc_rxmac_disable(internals->rxmac[i]);
396
397 for (i = 0; i < internals->max_txmac; ++i)
398 nc_txmac_disable(internals->txmac[i]);
399
400 return 0;
401 }
402
403 static uint64_t
nfb_eth_mac_addr_conv(struct rte_ether_addr * mac_addr)404 nfb_eth_mac_addr_conv(struct rte_ether_addr *mac_addr)
405 {
406 int i;
407 uint64_t res = 0;
408 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
409 res <<= 8;
410 res |= mac_addr->addr_bytes[i] & 0xFF;
411 }
412 return res;
413 }
414
415 /**
416 * DPDK callback to set primary MAC address.
417 *
418 * @param dev
419 * Pointer to Ethernet device structure.
420 * @param mac_addr
421 * MAC address to register.
422 *
423 * @return
424 * 0 on success, a negative errno value otherwise.
425 */
426 static int
nfb_eth_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)427 nfb_eth_mac_addr_set(struct rte_eth_dev *dev,
428 struct rte_ether_addr *mac_addr)
429 {
430 unsigned int i;
431 uint64_t mac;
432 struct rte_eth_dev_data *data = dev->data;
433 struct pmd_internals *internals = (struct pmd_internals *)
434 data->dev_private;
435
436 mac = nfb_eth_mac_addr_conv(mac_addr);
437 /* Until no real multi-port support, configure all RX MACs the same */
438 for (i = 0; i < internals->max_rxmac; ++i)
439 nc_rxmac_set_mac(internals->rxmac[i], 0, mac, 1);
440
441 return 0;
442 }
443
444 static int
nfb_eth_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t pool __rte_unused)445 nfb_eth_mac_addr_add(struct rte_eth_dev *dev,
446 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool __rte_unused)
447 {
448 unsigned int i;
449 uint64_t mac;
450 struct rte_eth_dev_data *data = dev->data;
451 struct pmd_internals *internals = (struct pmd_internals *)
452 data->dev_private;
453
454 mac = nfb_eth_mac_addr_conv(mac_addr);
455 for (i = 0; i < internals->max_rxmac; ++i)
456 nc_rxmac_set_mac(internals->rxmac[i], index, mac, 1);
457
458 return 0;
459 }
460
461 static void
nfb_eth_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)462 nfb_eth_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
463 {
464 unsigned int i;
465 struct rte_eth_dev_data *data = dev->data;
466 struct pmd_internals *internals = (struct pmd_internals *)
467 data->dev_private;
468
469 for (i = 0; i < internals->max_rxmac; ++i)
470 nc_rxmac_set_mac(internals->rxmac[i], index, 0, 0);
471 }
472
473 static const struct eth_dev_ops ops = {
474 .dev_start = nfb_eth_dev_start,
475 .dev_stop = nfb_eth_dev_stop,
476 .dev_set_link_up = nfb_eth_dev_set_link_up,
477 .dev_set_link_down = nfb_eth_dev_set_link_down,
478 .dev_close = nfb_eth_dev_close,
479 .dev_configure = nfb_eth_dev_configure,
480 .dev_infos_get = nfb_eth_dev_info,
481 .promiscuous_enable = nfb_eth_promiscuous_enable,
482 .promiscuous_disable = nfb_eth_promiscuous_disable,
483 .allmulticast_enable = nfb_eth_allmulticast_enable,
484 .allmulticast_disable = nfb_eth_allmulticast_disable,
485 .rx_queue_start = nfb_eth_rx_queue_start,
486 .rx_queue_stop = nfb_eth_rx_queue_stop,
487 .tx_queue_start = nfb_eth_tx_queue_start,
488 .tx_queue_stop = nfb_eth_tx_queue_stop,
489 .rx_queue_setup = nfb_eth_rx_queue_setup,
490 .tx_queue_setup = nfb_eth_tx_queue_setup,
491 .rx_queue_release = nfb_eth_rx_queue_release,
492 .tx_queue_release = nfb_eth_tx_queue_release,
493 .link_update = nfb_eth_link_update,
494 .stats_get = nfb_eth_stats_get,
495 .stats_reset = nfb_eth_stats_reset,
496 .mac_addr_set = nfb_eth_mac_addr_set,
497 .mac_addr_add = nfb_eth_mac_addr_add,
498 .mac_addr_remove = nfb_eth_mac_addr_remove,
499 };
500
501 /**
502 * DPDK callback to initialize an ethernet device
503 *
504 * @param dev
505 * Pointer to ethernet device structure
506 *
507 * @return
508 * 0 on success, a negative errno value otherwise.
509 */
510 static int
nfb_eth_dev_init(struct rte_eth_dev * dev)511 nfb_eth_dev_init(struct rte_eth_dev *dev)
512 {
513 uint32_t mac_count;
514 struct rte_eth_dev_data *data = dev->data;
515 struct pmd_internals *internals = (struct pmd_internals *)
516 data->dev_private;
517 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
518 struct rte_pci_addr *pci_addr = &pci_dev->addr;
519 struct rte_ether_addr eth_addr_init;
520 char nfb_dev[PATH_MAX];
521
522 NFB_LOG(INFO, "Initializing NFB device (" PCI_PRI_FMT ")",
523 pci_addr->domain, pci_addr->bus, pci_addr->devid,
524 pci_addr->function);
525
526 snprintf(nfb_dev, sizeof(nfb_dev),
527 "/dev/nfb/by-pci-slot/" PCI_PRI_FMT,
528 pci_addr->domain, pci_addr->bus, pci_addr->devid,
529 pci_addr->function);
530
531 /*
532 * Get number of available DMA RX and TX queues, which is maximum
533 * number of queues that can be created and store it in private device
534 * data structure.
535 */
536 internals->nfb = nfb_open(nfb_dev);
537 if (internals->nfb == NULL) {
538 NFB_LOG(ERR, "nfb_open(): failed to open %s", nfb_dev);
539 return -EINVAL;
540 }
541 data->nb_rx_queues = ndp_get_rx_queue_available_count(internals->nfb);
542 data->nb_tx_queues = ndp_get_tx_queue_available_count(internals->nfb);
543
544 NFB_LOG(INFO, "Available NDP queues RX: %u TX: %u",
545 data->nb_rx_queues, data->nb_tx_queues);
546
547 nfb_nc_rxmac_init(internals->nfb,
548 internals->rxmac,
549 &internals->max_rxmac);
550 nfb_nc_txmac_init(internals->nfb,
551 internals->txmac,
552 &internals->max_txmac);
553
554 /* Set rx, tx burst functions */
555 dev->rx_pkt_burst = nfb_eth_ndp_rx;
556 dev->tx_pkt_burst = nfb_eth_ndp_tx;
557
558 /* Set function callbacks for Ethernet API */
559 dev->dev_ops = &ops;
560
561 /* Get link state */
562 nfb_eth_link_update(dev, 0);
563
564 /* Allocate space for MAC addresses */
565 mac_count = nfb_eth_get_max_mac_address_count(dev);
566 data->mac_addrs = rte_zmalloc(data->name,
567 sizeof(struct rte_ether_addr) * mac_count, RTE_CACHE_LINE_SIZE);
568 if (data->mac_addrs == NULL) {
569 NFB_LOG(ERR, "Could not alloc space for MAC address");
570 nfb_close(internals->nfb);
571 return -EINVAL;
572 }
573
574 rte_eth_random_addr(eth_addr_init.addr_bytes);
575 eth_addr_init.addr_bytes[0] = eth_addr.addr_bytes[0];
576 eth_addr_init.addr_bytes[1] = eth_addr.addr_bytes[1];
577 eth_addr_init.addr_bytes[2] = eth_addr.addr_bytes[2];
578
579 nfb_eth_mac_addr_set(dev, ð_addr_init);
580 rte_ether_addr_copy(ð_addr_init, &dev->data->mac_addrs[0]);
581
582 data->promiscuous = nfb_eth_promiscuous_get(dev);
583 data->all_multicast = nfb_eth_allmulticast_get(dev);
584
585 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
586
587 NFB_LOG(INFO, "NFB device (" PCI_PRI_FMT ") successfully initialized",
588 pci_addr->domain, pci_addr->bus, pci_addr->devid,
589 pci_addr->function);
590
591 return 0;
592 }
593
594 /**
595 * DPDK callback to uninitialize an ethernet device
596 *
597 * @param dev
598 * Pointer to ethernet device structure
599 *
600 * @return
601 * 0 on success, a negative errno value otherwise.
602 */
603 static int
nfb_eth_dev_uninit(struct rte_eth_dev * dev)604 nfb_eth_dev_uninit(struct rte_eth_dev *dev)
605 {
606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
607 struct rte_pci_addr *pci_addr = &pci_dev->addr;
608
609 nfb_eth_dev_close(dev);
610
611 NFB_LOG(INFO, "NFB device (" PCI_PRI_FMT ") successfully uninitialized",
612 pci_addr->domain, pci_addr->bus, pci_addr->devid,
613 pci_addr->function);
614
615 return 0;
616 }
617
618 static const struct rte_pci_id nfb_pci_id_table[] = {
619 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_40G2) },
620 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_100G2) },
621 { RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE, PCI_DEVICE_ID_NFB_200G2QL) },
622 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3) },
623 { RTE_PCI_DEVICE(PCI_VENDOR_ID_SILICOM, PCI_DEVICE_ID_FB2CGG3D) },
624 { .vendor_id = 0, }
625 };
626
627 /**
628 * DPDK callback to register a PCI device.
629 *
630 * This function spawns Ethernet devices out of a given PCI device.
631 *
632 * @param[in] pci_drv
633 * PCI driver structure (nfb_driver).
634 * @param[in] pci_dev
635 * PCI device information.
636 *
637 * @return
638 * 0 on success, a negative errno value otherwise.
639 */
640 static int
nfb_eth_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)641 nfb_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
642 struct rte_pci_device *pci_dev)
643 {
644 return rte_eth_dev_pci_generic_probe(pci_dev,
645 sizeof(struct pmd_internals), nfb_eth_dev_init);
646 }
647
648 /**
649 * DPDK callback to remove a PCI device.
650 *
651 * This function removes all Ethernet devices belong to a given PCI device.
652 *
653 * @param[in] pci_dev
654 * Pointer to the PCI device.
655 *
656 * @return
657 * 0 on success, the function cannot fail.
658 */
659 static int
nfb_eth_pci_remove(struct rte_pci_device * pci_dev)660 nfb_eth_pci_remove(struct rte_pci_device *pci_dev)
661 {
662 return rte_eth_dev_pci_generic_remove(pci_dev, nfb_eth_dev_uninit);
663 }
664
665 static struct rte_pci_driver nfb_eth_driver = {
666 .id_table = nfb_pci_id_table,
667 .probe = nfb_eth_pci_probe,
668 .remove = nfb_eth_pci_remove,
669 };
670
671 RTE_PMD_REGISTER_PCI(RTE_NFB_DRIVER_NAME, nfb_eth_driver);
672 RTE_PMD_REGISTER_PCI_TABLE(RTE_NFB_DRIVER_NAME, nfb_pci_id_table);
673 RTE_PMD_REGISTER_KMOD_DEP(RTE_NFB_DRIVER_NAME, "* nfb");
674 RTE_LOG_REGISTER_DEFAULT(nfb_logtype, NOTICE);
675