1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <rte_mbuf.h> 35 #include <rte_ethdev.h> 36 #include <rte_malloc.h> 37 #include <rte_memcpy.h> 38 #include <rte_memory.h> 39 #include <rte_ring.h> 40 41 #include "virtual_pmd.h" 42 43 #define MAX_PKT_BURST 512 44 45 static const char *virtual_ethdev_driver_name = "Virtual PMD"; 46 47 struct virtual_ethdev_private { 48 struct rte_eth_stats eth_stats; 49 50 struct rte_ring *rx_queue; 51 struct rte_ring *tx_queue; 52 53 int tx_burst_fail_count; 54 }; 55 56 struct virtual_ethdev_queue { 57 int port_id; 58 int queue_id; 59 }; 60 61 static int 62 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused) 63 { 64 eth_dev->data->dev_started = 1; 65 66 return 0; 67 } 68 69 static int 70 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused) 71 { 72 eth_dev->data->dev_started = 0; 73 74 return -1; 75 } 76 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused) 77 { 78 struct rte_mbuf *pkt = NULL; 79 struct virtual_ethdev_private *prv = eth_dev->data->dev_private; 80 81 eth_dev->data->dev_link.link_status = 0; 82 eth_dev->data->dev_started = 0; 83 while (rte_ring_dequeue(prv->rx_queue, (void **)&pkt) != -ENOENT) 84 rte_pktmbuf_free(pkt); 85 86 while (rte_ring_dequeue(prv->tx_queue, (void **)&pkt) != -ENOENT) 87 rte_pktmbuf_free(pkt); 88 } 89 90 static void 91 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused) 92 {} 93 94 static int 95 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused) 96 { 97 return 0; 98 } 99 100 static int 101 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused) 102 { 103 return -1; 104 } 105 106 static void 107 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused, 108 struct rte_eth_dev_info *dev_info) 109 { 110 dev_info->driver_name = virtual_ethdev_driver_name; 111 dev_info->max_mac_addrs = 1; 112 113 dev_info->max_rx_pktlen = (uint32_t)2048; 114 115 dev_info->max_rx_queues = (uint16_t)128; 116 dev_info->max_tx_queues = (uint16_t)512; 117 118 dev_info->min_rx_bufsize = 0; 119 dev_info->pci_dev = NULL; 120 } 121 122 static int 123 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev, 124 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, 125 unsigned int socket_id, 126 const struct rte_eth_rxconf *rx_conf __rte_unused, 127 struct rte_mempool *mb_pool __rte_unused) 128 { 129 struct virtual_ethdev_queue *rx_q; 130 131 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 132 sizeof(struct virtual_ethdev_queue), 0, socket_id); 133 134 if (rx_q == NULL) 135 return -1; 136 137 rx_q->port_id = dev->data->port_id; 138 rx_q->queue_id = rx_queue_id; 139 140 dev->data->rx_queues[rx_queue_id] = rx_q; 141 142 return 0; 143 } 144 145 static int 146 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 147 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused, 148 unsigned int socket_id __rte_unused, 149 const struct rte_eth_rxconf *rx_conf __rte_unused, 150 struct rte_mempool *mb_pool __rte_unused) 151 { 152 return -1; 153 } 154 155 static int 156 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev, 157 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, 158 unsigned int socket_id, 159 const struct rte_eth_txconf *tx_conf __rte_unused) 160 { 161 struct virtual_ethdev_queue *tx_q; 162 163 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 164 sizeof(struct virtual_ethdev_queue), 0, socket_id); 165 166 if (tx_q == NULL) 167 return -1; 168 169 tx_q->port_id = dev->data->port_id; 170 tx_q->queue_id = tx_queue_id; 171 172 dev->data->tx_queues[tx_queue_id] = tx_q; 173 174 return 0; 175 } 176 177 static int 178 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 179 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused, 180 unsigned int socket_id __rte_unused, 181 const struct rte_eth_txconf *tx_conf __rte_unused) 182 { 183 return -1; 184 } 185 186 static void 187 virtual_ethdev_rx_queue_release(void *q __rte_unused) 188 { 189 } 190 191 static void 192 virtual_ethdev_tx_queue_release(void *q __rte_unused) 193 { 194 } 195 196 static int 197 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev, 198 int wait_to_complete __rte_unused) 199 { 200 if (!bonded_eth_dev->data->dev_started) 201 bonded_eth_dev->data->dev_link.link_status = 0; 202 203 return 0; 204 } 205 206 static int 207 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused, 208 int wait_to_complete __rte_unused) 209 { 210 return -1; 211 } 212 213 static void 214 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 215 { 216 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 217 218 if (stats) 219 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats)); 220 } 221 222 static void 223 virtual_ethdev_stats_reset(struct rte_eth_dev *dev) 224 { 225 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 226 struct rte_mbuf *pkt = NULL; 227 228 while (rte_ring_dequeue(dev_private->tx_queue, (void **)&pkt) == -ENOBUFS) 229 rte_pktmbuf_free(pkt); 230 231 /* Reset internal statistics */ 232 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats)); 233 } 234 235 static void 236 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused) 237 {} 238 239 static void 240 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused) 241 {} 242 243 244 static struct eth_dev_ops virtual_ethdev_default_dev_ops = { 245 .dev_configure = virtual_ethdev_configure_success, 246 .dev_start = virtual_ethdev_start_success, 247 .dev_stop = virtual_ethdev_stop, 248 .dev_close = virtual_ethdev_close, 249 .dev_infos_get = virtual_ethdev_info_get, 250 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success, 251 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success, 252 .rx_queue_release = virtual_ethdev_rx_queue_release, 253 .tx_queue_release = virtual_ethdev_tx_queue_release, 254 .link_update = virtual_ethdev_link_update_success, 255 .stats_get = virtual_ethdev_stats_get, 256 .stats_reset = virtual_ethdev_stats_reset, 257 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable, 258 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable 259 }; 260 261 262 void 263 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success) 264 { 265 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 266 267 if (success) 268 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_success; 269 else 270 vrtl_eth_dev->dev_ops->dev_start = virtual_ethdev_start_fail; 271 272 } 273 274 void 275 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success) 276 { 277 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 278 279 if (success) 280 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_success; 281 else 282 vrtl_eth_dev->dev_ops->dev_configure = virtual_ethdev_configure_fail; 283 } 284 285 void 286 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 287 { 288 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 289 290 if (success) 291 vrtl_eth_dev->dev_ops->rx_queue_setup = 292 virtual_ethdev_rx_queue_setup_success; 293 else 294 vrtl_eth_dev->dev_ops->rx_queue_setup = 295 virtual_ethdev_rx_queue_setup_fail; 296 } 297 298 void 299 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success) 300 { 301 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 302 303 if (success) 304 vrtl_eth_dev->dev_ops->tx_queue_setup = 305 virtual_ethdev_tx_queue_setup_success; 306 else 307 vrtl_eth_dev->dev_ops->tx_queue_setup = 308 virtual_ethdev_tx_queue_setup_fail; 309 } 310 311 void 312 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success) 313 { 314 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 315 316 if (success) 317 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_success; 318 else 319 vrtl_eth_dev->dev_ops->link_update = virtual_ethdev_link_update_fail; 320 } 321 322 323 static uint16_t 324 virtual_ethdev_rx_burst_success(void *queue __rte_unused, 325 struct rte_mbuf **bufs, 326 uint16_t nb_pkts) 327 { 328 struct rte_eth_dev *vrtl_eth_dev; 329 struct virtual_ethdev_queue *pq_map; 330 struct virtual_ethdev_private *dev_private; 331 332 int rx_count, i; 333 334 pq_map = (struct virtual_ethdev_queue *)queue; 335 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; 336 dev_private = vrtl_eth_dev->data->dev_private; 337 338 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs, 339 nb_pkts); 340 341 /* increments ipackets count */ 342 dev_private->eth_stats.ipackets += rx_count; 343 344 /* increments ibytes count */ 345 for (i = 0; i < rx_count; i++) 346 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]); 347 348 return rx_count; 349 } 350 351 static uint16_t 352 virtual_ethdev_rx_burst_fail(void *queue __rte_unused, 353 struct rte_mbuf **bufs __rte_unused, 354 uint16_t nb_pkts __rte_unused) 355 { 356 return 0; 357 } 358 359 static uint16_t 360 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, 361 uint16_t nb_pkts) 362 { 363 struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue; 364 365 struct rte_eth_dev *vrtl_eth_dev; 366 struct virtual_ethdev_private *dev_private; 367 368 int i; 369 370 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 371 dev_private = vrtl_eth_dev->data->dev_private; 372 373 if (!vrtl_eth_dev->data->dev_link.link_status) 374 nb_pkts = 0; 375 else 376 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, 377 nb_pkts); 378 379 /* increment opacket count */ 380 dev_private->eth_stats.opackets += nb_pkts; 381 382 /* increment obytes count */ 383 for (i = 0; i < nb_pkts; i++) 384 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]); 385 386 return nb_pkts; 387 } 388 389 static uint16_t 390 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, 391 uint16_t nb_pkts) 392 { 393 struct rte_eth_dev *vrtl_eth_dev = NULL; 394 struct virtual_ethdev_queue *tx_q = NULL; 395 struct virtual_ethdev_private *dev_private = NULL; 396 397 int i; 398 399 tx_q = (struct virtual_ethdev_queue *)queue; 400 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 401 dev_private = vrtl_eth_dev->data->dev_private; 402 403 if (dev_private->tx_burst_fail_count < nb_pkts) { 404 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; 405 406 /* increment opacket count */ 407 dev_private->eth_stats.opackets += successfully_txd; 408 409 /* free packets in burst */ 410 for (i = 0; i < successfully_txd; i++) { 411 /* free packets in burst */ 412 if (bufs[i] != NULL) 413 rte_pktmbuf_free(bufs[i]); 414 415 bufs[i] = NULL; 416 } 417 418 return successfully_txd; 419 } 420 421 return 0; 422 } 423 424 425 void 426 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success) 427 { 428 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 429 430 if (success) 431 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 432 else 433 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail; 434 } 435 436 437 void 438 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success) 439 { 440 struct virtual_ethdev_private *dev_private = NULL; 441 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 442 443 dev_private = vrtl_eth_dev->data->dev_private; 444 445 if (success) 446 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 447 else 448 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; 449 450 dev_private->tx_burst_fail_count = 0; 451 } 452 453 void 454 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, 455 uint8_t packet_fail_count) 456 { 457 struct virtual_ethdev_private *dev_private = NULL; 458 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 459 460 461 dev_private = vrtl_eth_dev->data->dev_private; 462 dev_private->tx_burst_fail_count = packet_fail_count; 463 } 464 465 void 466 virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status) 467 { 468 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 469 470 vrtl_eth_dev->data->dev_link.link_status = link_status; 471 } 472 473 void 474 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, 475 uint8_t link_status) 476 { 477 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 478 479 vrtl_eth_dev->data->dev_link.link_status = link_status; 480 481 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC); 482 } 483 484 int 485 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id, 486 struct rte_mbuf **pkt_burst, int burst_length) 487 { 488 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 489 struct virtual_ethdev_private *dev_private = 490 vrtl_eth_dev->data->dev_private; 491 492 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst, 493 burst_length); 494 } 495 496 int 497 virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id, 498 struct rte_mbuf **pkt_burst, int burst_length) 499 { 500 struct virtual_ethdev_private *dev_private; 501 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 502 503 dev_private = vrtl_eth_dev->data->dev_private; 504 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, 505 burst_length); 506 } 507 508 static uint8_t 509 get_number_of_sockets(void) 510 { 511 int sockets = 0; 512 int i; 513 const struct rte_memseg *ms = rte_eal_get_physmem_layout(); 514 515 for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) { 516 if (sockets < ms[i].socket_id) 517 sockets = ms[i].socket_id; 518 } 519 /* Number of sockets = maximum socket_id + 1 */ 520 return ++sockets; 521 } 522 523 int 524 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, 525 uint8_t socket_id, uint8_t isr_support) 526 { 527 struct rte_pci_device *pci_dev = NULL; 528 struct rte_eth_dev *eth_dev = NULL; 529 struct eth_driver *eth_drv = NULL; 530 struct rte_pci_driver *pci_drv = NULL; 531 struct eth_dev_ops *dev_ops = NULL; 532 struct rte_pci_id *id_table = NULL; 533 struct virtual_ethdev_private *dev_private = NULL; 534 char name_buf[RTE_RING_NAMESIZE]; 535 536 537 /* now do all data allocation - for eth_dev structure, dummy pci driver 538 * and internal (dev_private) data 539 */ 540 541 if (socket_id >= get_number_of_sockets()) 542 goto err; 543 544 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id); 545 if (pci_dev == NULL) 546 goto err; 547 548 eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id); 549 if (eth_drv == NULL) 550 goto err; 551 552 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id); 553 if (pci_drv == NULL) 554 goto err; 555 556 dev_ops = rte_zmalloc_socket(name, sizeof(*dev_ops), 0, socket_id); 557 if (dev_ops == NULL) 558 goto err; 559 560 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id); 561 if (id_table == NULL) 562 goto err; 563 564 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id); 565 if (dev_private == NULL) 566 goto err; 567 568 memset(dev_private, 0, sizeof(*dev_private)); 569 570 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name); 571 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 572 0); 573 if (dev_private->rx_queue == NULL) 574 goto err; 575 576 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name); 577 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 578 0); 579 if (dev_private->tx_queue == NULL) 580 goto err; 581 582 /* reserve an ethdev entry */ 583 eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI); 584 if (eth_dev == NULL) 585 goto err; 586 587 pci_dev->numa_node = socket_id; 588 pci_drv->name = virtual_ethdev_driver_name; 589 pci_drv->id_table = id_table; 590 591 if (isr_support) 592 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC; 593 else 594 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; 595 596 597 eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv); 598 eth_dev->driver = eth_drv; 599 600 eth_dev->data->nb_rx_queues = (uint16_t)1; 601 eth_dev->data->nb_tx_queues = (uint16_t)1; 602 603 TAILQ_INIT(&(eth_dev->link_intr_cbs)); 604 605 eth_dev->data->dev_link.link_status = 0; 606 eth_dev->data->dev_link.link_speed = ETH_LINK_SPEED_10000; 607 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 608 609 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0); 610 if (eth_dev->data->mac_addrs == NULL) 611 goto err; 612 613 memcpy(eth_dev->data->mac_addrs, mac_addr, 614 sizeof(*eth_dev->data->mac_addrs)); 615 616 eth_dev->data->dev_started = 0; 617 eth_dev->data->promiscuous = 0; 618 eth_dev->data->scattered_rx = 0; 619 eth_dev->data->all_multicast = 0; 620 621 eth_dev->data->dev_private = dev_private; 622 623 eth_dev->dev_ops = dev_ops; 624 625 /* Copy default device operation functions */ 626 memcpy(eth_dev->dev_ops, &virtual_ethdev_default_dev_ops, 627 sizeof(*eth_dev->dev_ops)); 628 629 eth_dev->pci_dev = pci_dev; 630 eth_dev->pci_dev->driver = ð_drv->pci_drv; 631 632 eth_dev->pci_dev->driver->id_table->device_id = 0xBEEF; 633 634 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 635 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 636 637 return eth_dev->data->port_id; 638 639 err: 640 if (pci_dev) 641 rte_free(pci_dev); 642 if (pci_drv) 643 rte_free(pci_drv); 644 if (eth_drv) 645 rte_free(eth_drv); 646 if (dev_ops) 647 rte_free(dev_ops); 648 if (id_table) 649 rte_free(id_table); 650 if (dev_private) 651 rte_free(dev_private); 652 653 return -1; 654 } 655