1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_mbuf.h> 35 #include <rte_ethdev.h> 36 #include <rte_malloc.h> 37 #include <rte_memcpy.h> 38 #include <rte_memory.h> 39 #include <rte_ring.h> 40 41 #include "virtual_pmd.h" 42 43 #define MAX_PKT_BURST 512 44 45 static const char *virtual_ethdev_driver_name = "Virtual PMD"; 46 47 struct virtual_ethdev_private { 48 struct eth_dev_ops dev_ops; 49 struct rte_eth_stats eth_stats; 50 51 struct rte_ring *rx_queue; 52 struct rte_ring *tx_queue; 53 54 int tx_burst_fail_count; 55 }; 56 57 struct virtual_ethdev_queue { 58 int port_id; 59 int queue_id; 60 }; 61 62 static int 63 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused) 64 { 65 eth_dev->data->dev_started = 1; 66 67 return 0; 68 } 69 70 static int 71 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused) 72 { 73 eth_dev->data->dev_started = 0; 74 75 return -1; 76 } 77 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused) 78 { 79 void *pkt = NULL; 80 struct virtual_ethdev_private *prv = eth_dev->data->dev_private; 81 82 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 83 eth_dev->data->dev_started = 0; 84 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT) 85 rte_pktmbuf_free(pkt); 86 87 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT) 88 rte_pktmbuf_free(pkt); 89 } 90 91 static void 92 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused) 93 {} 94 95 static int 96 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused) 97 { 98 return 0; 99 } 100 101 static int 102 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused) 103 { 104 return -1; 105 } 106 107 static void 108 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused, 109 struct rte_eth_dev_info *dev_info) 110 { 111 dev_info->driver_name = virtual_ethdev_driver_name; 112 dev_info->max_mac_addrs = 1; 113 114 dev_info->max_rx_pktlen = (uint32_t)2048; 115 116 dev_info->max_rx_queues = (uint16_t)128; 117 dev_info->max_tx_queues = (uint16_t)512; 118 119 dev_info->min_rx_bufsize = 0; 120 } 121 122 static int 123 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev, 124 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, 125 unsigned int socket_id, 126 const struct rte_eth_rxconf *rx_conf __rte_unused, 127 struct rte_mempool *mb_pool __rte_unused) 128 { 129 struct virtual_ethdev_queue *rx_q; 130 131 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 132 sizeof(struct virtual_ethdev_queue), 0, socket_id); 133 134 if (rx_q == NULL) 135 return -1; 136 137 rx_q->port_id = dev->data->port_id; 138 rx_q->queue_id = rx_queue_id; 139 140 dev->data->rx_queues[rx_queue_id] = rx_q; 141 142 return 0; 143 } 144 145 static int 146 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 147 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused, 148 unsigned int socket_id __rte_unused, 149 const struct rte_eth_rxconf *rx_conf __rte_unused, 150 struct rte_mempool *mb_pool __rte_unused) 151 { 152 return -1; 153 } 154 155 static int 156 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev, 157 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, 158 unsigned int socket_id, 159 const struct rte_eth_txconf *tx_conf __rte_unused) 160 { 161 struct virtual_ethdev_queue *tx_q; 162 163 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 164 sizeof(struct virtual_ethdev_queue), 0, socket_id); 165 166 if (tx_q == NULL) 167 return -1; 168 169 tx_q->port_id = dev->data->port_id; 170 tx_q->queue_id = tx_queue_id; 171 172 dev->data->tx_queues[tx_queue_id] = tx_q; 173 174 return 0; 175 } 176 177 static int 178 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 179 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused, 180 unsigned int socket_id __rte_unused, 181 const struct rte_eth_txconf *tx_conf __rte_unused) 182 { 183 return -1; 184 } 185 186 static void 187 virtual_ethdev_rx_queue_release(void *q __rte_unused) 188 { 189 } 190 191 static void 192 virtual_ethdev_tx_queue_release(void *q __rte_unused) 193 { 194 } 195 196 static int 197 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev, 198 int wait_to_complete __rte_unused) 199 { 200 if (!bonded_eth_dev->data->dev_started) 201 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 202 203 return 0; 204 } 205 206 static int 207 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused, 208 int wait_to_complete __rte_unused) 209 { 210 return -1; 211 } 212 213 static void 214 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 215 { 216 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 217 218 if (stats) 219 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats)); 220 } 221 222 static void 223 virtual_ethdev_stats_reset(struct rte_eth_dev *dev) 224 { 225 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 226 void *pkt = NULL; 227 228 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS) 229 rte_pktmbuf_free(pkt); 230 231 /* Reset internal statistics */ 232 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats)); 233 } 234 235 static void 236 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused) 237 {} 238 239 static void 240 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused) 241 {} 242 243 244 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = { 245 .dev_configure = virtual_ethdev_configure_success, 246 .dev_start = virtual_ethdev_start_success, 247 .dev_stop = virtual_ethdev_stop, 248 .dev_close = virtual_ethdev_close, 249 .dev_infos_get = virtual_ethdev_info_get, 250 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success, 251 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success, 252 .rx_queue_release = virtual_ethdev_rx_queue_release, 253 .tx_queue_release = virtual_ethdev_tx_queue_release, 254 .link_update = virtual_ethdev_link_update_success, 255 .stats_get = virtual_ethdev_stats_get, 256 .stats_reset = virtual_ethdev_stats_reset, 257 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable, 258 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable 259 }; 260 261 262 void 263 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success) 264 { 265 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 266 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 267 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 268 269 if (success) 270 dev_ops->dev_start = virtual_ethdev_start_success; 271 else 272 dev_ops->dev_start = virtual_ethdev_start_fail; 273 274 } 275 276 void 277 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success) 278 { 279 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 280 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 281 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 282 283 if (success) 284 dev_ops->dev_configure = virtual_ethdev_configure_success; 285 else 286 dev_ops->dev_configure = virtual_ethdev_configure_fail; 287 } 288 289 void 290 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 291 { 292 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 293 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 294 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 295 296 if (success) 297 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success; 298 else 299 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail; 300 } 301 302 void 303 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 304 { 305 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 306 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 307 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 308 309 if (success) 310 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success; 311 else 312 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail; 313 } 314 315 void 316 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success) 317 { 318 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 319 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 320 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 321 322 if (success) 323 dev_ops->link_update = virtual_ethdev_link_update_success; 324 else 325 dev_ops->link_update = virtual_ethdev_link_update_fail; 326 } 327 328 329 static uint16_t 330 virtual_ethdev_rx_burst_success(void *queue __rte_unused, 331 struct rte_mbuf **bufs, 332 uint16_t nb_pkts) 333 { 334 struct rte_eth_dev *vrtl_eth_dev; 335 struct virtual_ethdev_queue *pq_map; 336 struct virtual_ethdev_private *dev_private; 337 338 int rx_count, i; 339 340 pq_map = (struct virtual_ethdev_queue *)queue; 341 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; 342 dev_private = vrtl_eth_dev->data->dev_private; 343 344 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs, 345 nb_pkts); 346 347 /* increments ipackets count */ 348 dev_private->eth_stats.ipackets += rx_count; 349 350 /* increments ibytes count */ 351 for (i = 0; i < rx_count; i++) 352 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]); 353 354 return rx_count; 355 } 356 357 static uint16_t 358 virtual_ethdev_rx_burst_fail(void *queue __rte_unused, 359 struct rte_mbuf **bufs __rte_unused, 360 uint16_t nb_pkts __rte_unused) 361 { 362 return 0; 363 } 364 365 static uint16_t 366 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, 367 uint16_t nb_pkts) 368 { 369 struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue; 370 371 struct rte_eth_dev *vrtl_eth_dev; 372 struct virtual_ethdev_private *dev_private; 373 374 int i; 375 376 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 377 dev_private = vrtl_eth_dev->data->dev_private; 378 379 if (!vrtl_eth_dev->data->dev_link.link_status) 380 nb_pkts = 0; 381 else 382 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, 383 nb_pkts); 384 385 /* increment opacket count */ 386 dev_private->eth_stats.opackets += nb_pkts; 387 388 /* increment obytes count */ 389 for (i = 0; i < nb_pkts; i++) 390 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]); 391 392 return nb_pkts; 393 } 394 395 static uint16_t 396 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, 397 uint16_t nb_pkts) 398 { 399 struct rte_eth_dev *vrtl_eth_dev = NULL; 400 struct virtual_ethdev_queue *tx_q = NULL; 401 struct virtual_ethdev_private *dev_private = NULL; 402 403 int i; 404 405 tx_q = (struct virtual_ethdev_queue *)queue; 406 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 407 dev_private = vrtl_eth_dev->data->dev_private; 408 409 if (dev_private->tx_burst_fail_count < nb_pkts) { 410 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; 411 412 /* increment opacket count */ 413 dev_private->eth_stats.opackets += successfully_txd; 414 415 /* free packets in burst */ 416 for (i = 0; i < successfully_txd; i++) { 417 /* free packets in burst */ 418 if (bufs[i] != NULL) 419 rte_pktmbuf_free(bufs[i]); 420 421 bufs[i] = NULL; 422 } 423 424 return successfully_txd; 425 } 426 427 return 0; 428 } 429 430 431 void 432 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success) 433 { 434 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 435 436 if (success) 437 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 438 else 439 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail; 440 } 441 442 443 void 444 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success) 445 { 446 struct virtual_ethdev_private *dev_private = NULL; 447 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 448 449 dev_private = vrtl_eth_dev->data->dev_private; 450 451 if (success) 452 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 453 else 454 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; 455 456 dev_private->tx_burst_fail_count = 0; 457 } 458 459 void 460 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, 461 uint8_t packet_fail_count) 462 { 463 struct virtual_ethdev_private *dev_private = NULL; 464 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 465 466 467 dev_private = vrtl_eth_dev->data->dev_private; 468 dev_private->tx_burst_fail_count = packet_fail_count; 469 } 470 471 void 472 virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status) 473 { 474 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 475 476 vrtl_eth_dev->data->dev_link.link_status = link_status; 477 } 478 479 void 480 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, 481 uint8_t link_status) 482 { 483 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 484 485 vrtl_eth_dev->data->dev_link.link_status = link_status; 486 487 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); 488 } 489 490 int 491 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id, 492 struct rte_mbuf **pkt_burst, int burst_length) 493 { 494 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 495 struct virtual_ethdev_private *dev_private = 496 vrtl_eth_dev->data->dev_private; 497 498 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst, 499 burst_length); 500 } 501 502 int 503 virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id, 504 struct rte_mbuf **pkt_burst, int burst_length) 505 { 506 struct virtual_ethdev_private *dev_private; 507 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 508 509 dev_private = vrtl_eth_dev->data->dev_private; 510 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, 511 burst_length); 512 } 513 514 static uint8_t 515 get_number_of_sockets(void) 516 { 517 int sockets = 0; 518 int i; 519 const struct rte_memseg *ms = rte_eal_get_physmem_layout(); 520 521 for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) { 522 if (sockets < ms[i].socket_id) 523 sockets = ms[i].socket_id; 524 } 525 /* Number of sockets = maximum socket_id + 1 */ 526 return ++sockets; 527 } 528 529 int 530 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, 531 uint8_t socket_id, uint8_t isr_support) 532 { 533 struct rte_pci_device *pci_dev = NULL; 534 struct rte_eth_dev *eth_dev = NULL; 535 struct eth_driver *eth_drv = NULL; 536 struct rte_pci_driver *pci_drv = NULL; 537 struct rte_pci_id *id_table = NULL; 538 struct virtual_ethdev_private *dev_private = NULL; 539 char name_buf[RTE_RING_NAMESIZE]; 540 541 542 /* now do all data allocation - for eth_dev structure, dummy pci driver 543 * and internal (dev_private) data 544 */ 545 546 if (socket_id >= get_number_of_sockets()) 547 goto err; 548 549 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id); 550 if (pci_dev == NULL) 551 goto err; 552 553 eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id); 554 if (eth_drv == NULL) 555 goto err; 556 557 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id); 558 if (pci_drv == NULL) 559 goto err; 560 561 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id); 562 if (id_table == NULL) 563 goto err; 564 id_table->device_id = 0xBEEF; 565 566 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id); 567 if (dev_private == NULL) 568 goto err; 569 570 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name); 571 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 572 0); 573 if (dev_private->rx_queue == NULL) 574 goto err; 575 576 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name); 577 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 578 0); 579 if (dev_private->tx_queue == NULL) 580 goto err; 581 582 /* reserve an ethdev entry */ 583 eth_dev = rte_eth_dev_allocate(name); 584 if (eth_dev == NULL) 585 goto err; 586 587 pci_dev->device.numa_node = socket_id; 588 pci_drv->driver.name = virtual_ethdev_driver_name; 589 pci_drv->id_table = id_table; 590 591 if (isr_support) 592 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC; 593 else 594 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; 595 596 597 eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv); 598 eth_dev->driver = eth_drv; 599 600 eth_dev->data->nb_rx_queues = (uint16_t)1; 601 eth_dev->data->nb_tx_queues = (uint16_t)1; 602 603 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 604 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 605 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 606 607 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 608 if (eth_dev->data->mac_addrs == NULL) 609 goto err; 610 611 memcpy(eth_dev->data->mac_addrs, mac_addr, 612 sizeof(*eth_dev->data->mac_addrs)); 613 614 eth_dev->data->dev_started = 0; 615 eth_dev->data->promiscuous = 0; 616 eth_dev->data->scattered_rx = 0; 617 eth_dev->data->all_multicast = 0; 618 619 eth_dev->data->dev_private = dev_private; 620 621 /* Copy default device operation functions */ 622 dev_private->dev_ops = virtual_ethdev_default_dev_ops; 623 eth_dev->dev_ops = &dev_private->dev_ops; 624 625 pci_dev->device.driver = ð_drv->pci_drv.driver; 626 eth_dev->device = &pci_dev->device; 627 628 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 629 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 630 631 return eth_dev->data->port_id; 632 633 err: 634 rte_free(pci_dev); 635 rte_free(pci_drv); 636 rte_free(eth_drv); 637 rte_free(id_table); 638 rte_free(dev_private); 639 640 return -1; 641 } 642