1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_mbuf.h> 35 #include <rte_ethdev.h> 36 #include <rte_malloc.h> 37 #include <rte_memcpy.h> 38 #include <rte_memory.h> 39 #include <rte_ring.h> 40 41 #include "virtual_pmd.h" 42 43 #define MAX_PKT_BURST 512 44 45 static const char *virtual_ethdev_driver_name = "Virtual PMD"; 46 47 struct virtual_ethdev_private { 48 struct eth_dev_ops dev_ops; 49 struct rte_eth_stats eth_stats; 50 51 struct rte_ring *rx_queue; 52 struct rte_ring *tx_queue; 53 54 int tx_burst_fail_count; 55 }; 56 57 struct virtual_ethdev_queue { 58 int port_id; 59 int queue_id; 60 }; 61 62 static int 63 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused) 64 { 65 eth_dev->data->dev_started = 1; 66 67 return 0; 68 } 69 70 static int 71 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused) 72 { 73 eth_dev->data->dev_started = 0; 74 75 return -1; 76 } 77 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused) 78 { 79 void *pkt = NULL; 80 struct virtual_ethdev_private *prv = eth_dev->data->dev_private; 81 82 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 83 eth_dev->data->dev_started = 0; 84 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT) 85 rte_pktmbuf_free(pkt); 86 87 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT) 88 rte_pktmbuf_free(pkt); 89 } 90 91 static void 92 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused) 93 {} 94 95 static int 96 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused) 97 { 98 return 0; 99 } 100 101 static int 102 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused) 103 { 104 return -1; 105 } 106 107 static void 108 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused, 109 struct rte_eth_dev_info *dev_info) 110 { 111 dev_info->driver_name = virtual_ethdev_driver_name; 112 dev_info->max_mac_addrs = 1; 113 114 dev_info->max_rx_pktlen = (uint32_t)2048; 115 116 dev_info->max_rx_queues = (uint16_t)128; 117 dev_info->max_tx_queues = (uint16_t)512; 118 119 dev_info->min_rx_bufsize = 0; 120 dev_info->pci_dev = NULL; 121 } 122 123 static int 124 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev, 125 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, 126 unsigned int socket_id, 127 const struct rte_eth_rxconf *rx_conf __rte_unused, 128 struct rte_mempool *mb_pool __rte_unused) 129 { 130 struct virtual_ethdev_queue *rx_q; 131 132 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 133 sizeof(struct virtual_ethdev_queue), 0, socket_id); 134 135 if (rx_q == NULL) 136 return -1; 137 138 rx_q->port_id = dev->data->port_id; 139 rx_q->queue_id = rx_queue_id; 140 141 dev->data->rx_queues[rx_queue_id] = rx_q; 142 143 return 0; 144 } 145 146 static int 147 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 148 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused, 149 unsigned int socket_id __rte_unused, 150 const struct rte_eth_rxconf *rx_conf __rte_unused, 151 struct rte_mempool *mb_pool __rte_unused) 152 { 153 return -1; 154 } 155 156 static int 157 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev, 158 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, 159 unsigned int socket_id, 160 const struct rte_eth_txconf *tx_conf __rte_unused) 161 { 162 struct virtual_ethdev_queue *tx_q; 163 164 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 165 sizeof(struct virtual_ethdev_queue), 0, socket_id); 166 167 if (tx_q == NULL) 168 return -1; 169 170 tx_q->port_id = dev->data->port_id; 171 tx_q->queue_id = tx_queue_id; 172 173 dev->data->tx_queues[tx_queue_id] = tx_q; 174 175 return 0; 176 } 177 178 static int 179 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 180 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused, 181 unsigned int socket_id __rte_unused, 182 const struct rte_eth_txconf *tx_conf __rte_unused) 183 { 184 return -1; 185 } 186 187 static void 188 virtual_ethdev_rx_queue_release(void *q __rte_unused) 189 { 190 } 191 192 static void 193 virtual_ethdev_tx_queue_release(void *q __rte_unused) 194 { 195 } 196 197 static int 198 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev, 199 int wait_to_complete __rte_unused) 200 { 201 if (!bonded_eth_dev->data->dev_started) 202 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 203 204 return 0; 205 } 206 207 static int 208 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused, 209 int wait_to_complete __rte_unused) 210 { 211 return -1; 212 } 213 214 static void 215 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 216 { 217 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 218 219 if (stats) 220 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats)); 221 } 222 223 static void 224 virtual_ethdev_stats_reset(struct rte_eth_dev *dev) 225 { 226 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 227 void *pkt = NULL; 228 229 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS) 230 rte_pktmbuf_free(pkt); 231 232 /* Reset internal statistics */ 233 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats)); 234 } 235 236 static void 237 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused) 238 {} 239 240 static void 241 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused) 242 {} 243 244 245 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = { 246 .dev_configure = virtual_ethdev_configure_success, 247 .dev_start = virtual_ethdev_start_success, 248 .dev_stop = virtual_ethdev_stop, 249 .dev_close = virtual_ethdev_close, 250 .dev_infos_get = virtual_ethdev_info_get, 251 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success, 252 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success, 253 .rx_queue_release = virtual_ethdev_rx_queue_release, 254 .tx_queue_release = virtual_ethdev_tx_queue_release, 255 .link_update = virtual_ethdev_link_update_success, 256 .stats_get = virtual_ethdev_stats_get, 257 .stats_reset = virtual_ethdev_stats_reset, 258 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable, 259 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable 260 }; 261 262 263 void 264 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success) 265 { 266 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 267 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 268 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 269 270 if (success) 271 dev_ops->dev_start = virtual_ethdev_start_success; 272 else 273 dev_ops->dev_start = virtual_ethdev_start_fail; 274 275 } 276 277 void 278 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success) 279 { 280 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 281 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 282 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 283 284 if (success) 285 dev_ops->dev_configure = virtual_ethdev_configure_success; 286 else 287 dev_ops->dev_configure = virtual_ethdev_configure_fail; 288 } 289 290 void 291 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 292 { 293 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 294 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 295 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 296 297 if (success) 298 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success; 299 else 300 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail; 301 } 302 303 void 304 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 305 { 306 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 307 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 308 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 309 310 if (success) 311 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success; 312 else 313 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail; 314 } 315 316 void 317 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success) 318 { 319 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 320 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 321 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 322 323 if (success) 324 dev_ops->link_update = virtual_ethdev_link_update_success; 325 else 326 dev_ops->link_update = virtual_ethdev_link_update_fail; 327 } 328 329 330 static uint16_t 331 virtual_ethdev_rx_burst_success(void *queue __rte_unused, 332 struct rte_mbuf **bufs, 333 uint16_t nb_pkts) 334 { 335 struct rte_eth_dev *vrtl_eth_dev; 336 struct virtual_ethdev_queue *pq_map; 337 struct virtual_ethdev_private *dev_private; 338 339 int rx_count, i; 340 341 pq_map = (struct virtual_ethdev_queue *)queue; 342 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; 343 dev_private = vrtl_eth_dev->data->dev_private; 344 345 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs, 346 nb_pkts); 347 348 /* increments ipackets count */ 349 dev_private->eth_stats.ipackets += rx_count; 350 351 /* increments ibytes count */ 352 for (i = 0; i < rx_count; i++) 353 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]); 354 355 return rx_count; 356 } 357 358 static uint16_t 359 virtual_ethdev_rx_burst_fail(void *queue __rte_unused, 360 struct rte_mbuf **bufs __rte_unused, 361 uint16_t nb_pkts __rte_unused) 362 { 363 return 0; 364 } 365 366 static uint16_t 367 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, 368 uint16_t nb_pkts) 369 { 370 struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue; 371 372 struct rte_eth_dev *vrtl_eth_dev; 373 struct virtual_ethdev_private *dev_private; 374 375 int i; 376 377 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 378 dev_private = vrtl_eth_dev->data->dev_private; 379 380 if (!vrtl_eth_dev->data->dev_link.link_status) 381 nb_pkts = 0; 382 else 383 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, 384 nb_pkts); 385 386 /* increment opacket count */ 387 dev_private->eth_stats.opackets += nb_pkts; 388 389 /* increment obytes count */ 390 for (i = 0; i < nb_pkts; i++) 391 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]); 392 393 return nb_pkts; 394 } 395 396 static uint16_t 397 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, 398 uint16_t nb_pkts) 399 { 400 struct rte_eth_dev *vrtl_eth_dev = NULL; 401 struct virtual_ethdev_queue *tx_q = NULL; 402 struct virtual_ethdev_private *dev_private = NULL; 403 404 int i; 405 406 tx_q = (struct virtual_ethdev_queue *)queue; 407 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 408 dev_private = vrtl_eth_dev->data->dev_private; 409 410 if (dev_private->tx_burst_fail_count < nb_pkts) { 411 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; 412 413 /* increment opacket count */ 414 dev_private->eth_stats.opackets += successfully_txd; 415 416 /* free packets in burst */ 417 for (i = 0; i < successfully_txd; i++) { 418 /* free packets in burst */ 419 if (bufs[i] != NULL) 420 rte_pktmbuf_free(bufs[i]); 421 422 bufs[i] = NULL; 423 } 424 425 return successfully_txd; 426 } 427 428 return 0; 429 } 430 431 432 void 433 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success) 434 { 435 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 436 437 if (success) 438 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 439 else 440 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail; 441 } 442 443 444 void 445 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success) 446 { 447 struct virtual_ethdev_private *dev_private = NULL; 448 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 449 450 dev_private = vrtl_eth_dev->data->dev_private; 451 452 if (success) 453 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 454 else 455 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; 456 457 dev_private->tx_burst_fail_count = 0; 458 } 459 460 void 461 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, 462 uint8_t packet_fail_count) 463 { 464 struct virtual_ethdev_private *dev_private = NULL; 465 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 466 467 468 dev_private = vrtl_eth_dev->data->dev_private; 469 dev_private->tx_burst_fail_count = packet_fail_count; 470 } 471 472 void 473 virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status) 474 { 475 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 476 477 vrtl_eth_dev->data->dev_link.link_status = link_status; 478 } 479 480 void 481 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, 482 uint8_t link_status) 483 { 484 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 485 486 vrtl_eth_dev->data->dev_link.link_status = link_status; 487 488 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 489 } 490 491 int 492 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id, 493 struct rte_mbuf **pkt_burst, int burst_length) 494 { 495 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 496 struct virtual_ethdev_private *dev_private = 497 vrtl_eth_dev->data->dev_private; 498 499 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst, 500 burst_length); 501 } 502 503 int 504 virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id, 505 struct rte_mbuf **pkt_burst, int burst_length) 506 { 507 struct virtual_ethdev_private *dev_private; 508 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 509 510 dev_private = vrtl_eth_dev->data->dev_private; 511 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, 512 burst_length); 513 } 514 515 static uint8_t 516 get_number_of_sockets(void) 517 { 518 int sockets = 0; 519 int i; 520 const struct rte_memseg *ms = rte_eal_get_physmem_layout(); 521 522 for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) { 523 if (sockets < ms[i].socket_id) 524 sockets = ms[i].socket_id; 525 } 526 /* Number of sockets = maximum socket_id + 1 */ 527 return ++sockets; 528 } 529 530 int 531 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, 532 uint8_t socket_id, uint8_t isr_support) 533 { 534 struct rte_pci_device *pci_dev = NULL; 535 struct rte_eth_dev *eth_dev = NULL; 536 struct eth_driver *eth_drv = NULL; 537 struct rte_pci_driver *pci_drv = NULL; 538 struct rte_pci_id *id_table = NULL; 539 struct virtual_ethdev_private *dev_private = NULL; 540 char name_buf[RTE_RING_NAMESIZE]; 541 542 543 /* now do all data allocation - for eth_dev structure, dummy pci driver 544 * and internal (dev_private) data 545 */ 546 547 if (socket_id >= get_number_of_sockets()) 548 goto err; 549 550 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id); 551 if (pci_dev == NULL) 552 goto err; 553 554 eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id); 555 if (eth_drv == NULL) 556 goto err; 557 558 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id); 559 if (pci_drv == NULL) 560 goto err; 561 562 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id); 563 if (id_table == NULL) 564 goto err; 565 id_table->device_id = 0xBEEF; 566 567 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id); 568 if (dev_private == NULL) 569 goto err; 570 571 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name); 572 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 573 0); 574 if (dev_private->rx_queue == NULL) 575 goto err; 576 577 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name); 578 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 579 0); 580 if (dev_private->tx_queue == NULL) 581 goto err; 582 583 /* reserve an ethdev entry */ 584 eth_dev = rte_eth_dev_allocate(name); 585 if (eth_dev == NULL) 586 goto err; 587 588 pci_dev->device.numa_node = socket_id; 589 pci_drv->driver.name = virtual_ethdev_driver_name; 590 pci_drv->id_table = id_table; 591 592 if (isr_support) 593 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC; 594 else 595 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; 596 597 598 eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv); 599 eth_dev->driver = eth_drv; 600 601 eth_dev->data->nb_rx_queues = (uint16_t)1; 602 eth_dev->data->nb_tx_queues = (uint16_t)1; 603 604 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 605 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 606 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 607 608 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 609 if (eth_dev->data->mac_addrs == NULL) 610 goto err; 611 612 memcpy(eth_dev->data->mac_addrs, mac_addr, 613 sizeof(*eth_dev->data->mac_addrs)); 614 615 eth_dev->data->dev_started = 0; 616 eth_dev->data->promiscuous = 0; 617 eth_dev->data->scattered_rx = 0; 618 eth_dev->data->all_multicast = 0; 619 620 eth_dev->data->dev_private = dev_private; 621 622 /* Copy default device operation functions */ 623 dev_private->dev_ops = virtual_ethdev_default_dev_ops; 624 eth_dev->dev_ops = &dev_private->dev_ops; 625 626 eth_dev->pci_dev = pci_dev; 627 eth_dev->pci_dev->device.driver = ð_drv->pci_drv.driver; 628 629 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 630 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 631 632 return eth_dev->data->port_id; 633 634 err: 635 rte_free(pci_dev); 636 rte_free(pci_drv); 637 rte_free(eth_drv); 638 rte_free(id_table); 639 rte_free(dev_private); 640 641 return -1; 642 } 643