1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <rte_mbuf.h> 6 #include <rte_ethdev.h> 7 #include <rte_ethdev_driver.h> 8 #include <rte_pci.h> 9 #include <rte_bus_pci.h> 10 #include <rte_malloc.h> 11 #include <rte_memcpy.h> 12 #include <rte_memory.h> 13 #include <rte_ring.h> 14 15 #include "virtual_pmd.h" 16 17 #define MAX_PKT_BURST 512 18 19 static const char *virtual_ethdev_driver_name = "Virtual PMD"; 20 21 struct virtual_ethdev_private { 22 struct eth_dev_ops dev_ops; 23 struct rte_eth_stats eth_stats; 24 25 struct rte_ring *rx_queue; 26 struct rte_ring *tx_queue; 27 28 int tx_burst_fail_count; 29 }; 30 31 struct virtual_ethdev_queue { 32 int port_id; 33 int queue_id; 34 }; 35 36 static int 37 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused) 38 { 39 eth_dev->data->dev_started = 1; 40 41 return 0; 42 } 43 44 static int 45 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused) 46 { 47 eth_dev->data->dev_started = 0; 48 49 return -1; 50 } 51 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused) 52 { 53 void *pkt = NULL; 54 struct virtual_ethdev_private *prv = eth_dev->data->dev_private; 55 56 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 57 eth_dev->data->dev_started = 0; 58 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT) 59 rte_pktmbuf_free(pkt); 60 61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT) 62 rte_pktmbuf_free(pkt); 63 } 64 65 static int 66 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused) 67 { 68 return 0; 69 } 70 71 static int 72 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused) 73 { 74 return 0; 75 } 76 77 static int 78 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused) 79 { 80 return -1; 81 } 82 83 static int 84 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused, 85 struct rte_eth_dev_info *dev_info) 86 { 87 dev_info->driver_name = virtual_ethdev_driver_name; 88 dev_info->max_mac_addrs = 1; 89 90 dev_info->max_rx_pktlen = (uint32_t)2048; 91 92 dev_info->max_rx_queues = (uint16_t)128; 93 dev_info->max_tx_queues = (uint16_t)512; 94 95 dev_info->min_rx_bufsize = 0; 96 97 return 0; 98 } 99 100 static int 101 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev, 102 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused, 103 unsigned int socket_id, 104 const struct rte_eth_rxconf *rx_conf __rte_unused, 105 struct rte_mempool *mb_pool __rte_unused) 106 { 107 struct virtual_ethdev_queue *rx_q; 108 109 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 110 sizeof(struct virtual_ethdev_queue), 0, socket_id); 111 112 if (rx_q == NULL) 113 return -1; 114 115 rx_q->port_id = dev->data->port_id; 116 rx_q->queue_id = rx_queue_id; 117 118 dev->data->rx_queues[rx_queue_id] = rx_q; 119 120 return 0; 121 } 122 123 static int 124 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 125 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused, 126 unsigned int socket_id __rte_unused, 127 const struct rte_eth_rxconf *rx_conf __rte_unused, 128 struct rte_mempool *mb_pool __rte_unused) 129 { 130 return -1; 131 } 132 133 static int 134 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev, 135 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused, 136 unsigned int socket_id, 137 const struct rte_eth_txconf *tx_conf __rte_unused) 138 { 139 struct virtual_ethdev_queue *tx_q; 140 141 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL, 142 sizeof(struct virtual_ethdev_queue), 0, socket_id); 143 144 if (tx_q == NULL) 145 return -1; 146 147 tx_q->port_id = dev->data->port_id; 148 tx_q->queue_id = tx_queue_id; 149 150 dev->data->tx_queues[tx_queue_id] = tx_q; 151 152 return 0; 153 } 154 155 static int 156 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused, 157 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused, 158 unsigned int socket_id __rte_unused, 159 const struct rte_eth_txconf *tx_conf __rte_unused) 160 { 161 return -1; 162 } 163 164 static void 165 virtual_ethdev_rx_queue_release(void *q __rte_unused) 166 { 167 } 168 169 static void 170 virtual_ethdev_tx_queue_release(void *q __rte_unused) 171 { 172 } 173 174 static int 175 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev, 176 int wait_to_complete __rte_unused) 177 { 178 if (!bonded_eth_dev->data->dev_started) 179 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 180 181 return 0; 182 } 183 184 static int 185 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused, 186 int wait_to_complete __rte_unused) 187 { 188 return -1; 189 } 190 191 static int 192 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 193 { 194 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 195 196 if (stats) 197 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats)); 198 199 return 0; 200 } 201 202 static int 203 virtual_ethdev_stats_reset(struct rte_eth_dev *dev) 204 { 205 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 206 void *pkt = NULL; 207 208 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS) 209 rte_pktmbuf_free(pkt); 210 211 /* Reset internal statistics */ 212 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats)); 213 214 return 0; 215 } 216 217 static int 218 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused) 219 { 220 return 0; 221 } 222 223 static int 224 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused) 225 { 226 return 0; 227 } 228 229 static int 230 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev, 231 __rte_unused struct rte_ether_addr *addr) 232 { 233 return 0; 234 } 235 236 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = { 237 .dev_configure = virtual_ethdev_configure_success, 238 .dev_start = virtual_ethdev_start_success, 239 .dev_stop = virtual_ethdev_stop, 240 .dev_close = virtual_ethdev_close, 241 .dev_infos_get = virtual_ethdev_info_get, 242 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success, 243 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success, 244 .rx_queue_release = virtual_ethdev_rx_queue_release, 245 .tx_queue_release = virtual_ethdev_tx_queue_release, 246 .link_update = virtual_ethdev_link_update_success, 247 .mac_addr_set = virtual_ethdev_mac_address_set, 248 .stats_get = virtual_ethdev_stats_get, 249 .stats_reset = virtual_ethdev_stats_reset, 250 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable, 251 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable 252 }; 253 254 void 255 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success) 256 { 257 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 258 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 259 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 260 261 if (success) 262 dev_ops->dev_start = virtual_ethdev_start_success; 263 else 264 dev_ops->dev_start = virtual_ethdev_start_fail; 265 266 } 267 268 void 269 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success) 270 { 271 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 272 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 273 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 274 275 if (success) 276 dev_ops->dev_configure = virtual_ethdev_configure_success; 277 else 278 dev_ops->dev_configure = virtual_ethdev_configure_fail; 279 } 280 281 void 282 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success) 283 { 284 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 285 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 286 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 287 288 if (success) 289 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success; 290 else 291 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail; 292 } 293 294 void 295 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success) 296 { 297 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 298 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 299 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 300 301 if (success) 302 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success; 303 else 304 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail; 305 } 306 307 void 308 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success) 309 { 310 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 311 struct virtual_ethdev_private *dev_private = dev->data->dev_private; 312 struct eth_dev_ops *dev_ops = &dev_private->dev_ops; 313 314 if (success) 315 dev_ops->link_update = virtual_ethdev_link_update_success; 316 else 317 dev_ops->link_update = virtual_ethdev_link_update_fail; 318 } 319 320 321 static uint16_t 322 virtual_ethdev_rx_burst_success(void *queue __rte_unused, 323 struct rte_mbuf **bufs, 324 uint16_t nb_pkts) 325 { 326 struct rte_eth_dev *vrtl_eth_dev; 327 struct virtual_ethdev_queue *pq_map; 328 struct virtual_ethdev_private *dev_private; 329 330 int rx_count, i; 331 332 pq_map = (struct virtual_ethdev_queue *)queue; 333 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; 334 dev_private = vrtl_eth_dev->data->dev_private; 335 336 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs, 337 nb_pkts, NULL); 338 339 /* increments ipackets count */ 340 dev_private->eth_stats.ipackets += rx_count; 341 342 /* increments ibytes count */ 343 for (i = 0; i < rx_count; i++) 344 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]); 345 346 return rx_count; 347 } 348 349 static uint16_t 350 virtual_ethdev_rx_burst_fail(void *queue __rte_unused, 351 struct rte_mbuf **bufs __rte_unused, 352 uint16_t nb_pkts __rte_unused) 353 { 354 return 0; 355 } 356 357 static uint16_t 358 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, 359 uint16_t nb_pkts) 360 { 361 struct virtual_ethdev_queue *tx_q = queue; 362 363 struct rte_eth_dev *vrtl_eth_dev; 364 struct virtual_ethdev_private *dev_private; 365 366 int i; 367 368 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 369 dev_private = vrtl_eth_dev->data->dev_private; 370 371 if (!vrtl_eth_dev->data->dev_link.link_status) 372 nb_pkts = 0; 373 else 374 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, 375 nb_pkts, NULL); 376 377 /* increment opacket count */ 378 dev_private->eth_stats.opackets += nb_pkts; 379 380 /* increment obytes count */ 381 for (i = 0; i < nb_pkts; i++) 382 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]); 383 384 return nb_pkts; 385 } 386 387 static uint16_t 388 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, 389 uint16_t nb_pkts) 390 { 391 struct rte_eth_dev *vrtl_eth_dev = NULL; 392 struct virtual_ethdev_queue *tx_q = NULL; 393 struct virtual_ethdev_private *dev_private = NULL; 394 395 int i; 396 397 tx_q = queue; 398 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; 399 dev_private = vrtl_eth_dev->data->dev_private; 400 401 if (dev_private->tx_burst_fail_count < nb_pkts) { 402 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; 403 404 /* increment opacket count */ 405 dev_private->eth_stats.opackets += successfully_txd; 406 407 /* free packets in burst */ 408 for (i = 0; i < successfully_txd; i++) { 409 /* free packets in burst */ 410 if (bufs[i] != NULL) 411 rte_pktmbuf_free(bufs[i]); 412 413 bufs[i] = NULL; 414 } 415 416 return successfully_txd; 417 } 418 419 return 0; 420 } 421 422 423 void 424 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success) 425 { 426 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 427 428 if (success) 429 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 430 else 431 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail; 432 } 433 434 435 void 436 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success) 437 { 438 struct virtual_ethdev_private *dev_private = NULL; 439 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 440 441 dev_private = vrtl_eth_dev->data->dev_private; 442 443 if (success) 444 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 445 else 446 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; 447 448 dev_private->tx_burst_fail_count = 0; 449 } 450 451 void 452 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id, 453 uint8_t packet_fail_count) 454 { 455 struct virtual_ethdev_private *dev_private = NULL; 456 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 457 458 459 dev_private = vrtl_eth_dev->data->dev_private; 460 dev_private->tx_burst_fail_count = packet_fail_count; 461 } 462 463 void 464 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status) 465 { 466 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 467 468 vrtl_eth_dev->data->dev_link.link_status = link_status; 469 } 470 471 void 472 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id, 473 uint8_t link_status) 474 { 475 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 476 477 vrtl_eth_dev->data->dev_link.link_status = link_status; 478 479 rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC, 480 NULL); 481 } 482 483 int 484 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id, 485 struct rte_mbuf **pkt_burst, int burst_length) 486 { 487 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 488 struct virtual_ethdev_private *dev_private = 489 vrtl_eth_dev->data->dev_private; 490 491 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst, 492 burst_length, NULL); 493 } 494 495 int 496 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id, 497 struct rte_mbuf **pkt_burst, int burst_length) 498 { 499 struct virtual_ethdev_private *dev_private; 500 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; 501 502 dev_private = vrtl_eth_dev->data->dev_private; 503 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, 504 burst_length, NULL); 505 } 506 507 508 int 509 virtual_ethdev_create(const char *name, struct rte_ether_addr *mac_addr, 510 uint8_t socket_id, uint8_t isr_support) 511 { 512 struct rte_pci_device *pci_dev = NULL; 513 struct rte_eth_dev *eth_dev = NULL; 514 struct rte_pci_driver *pci_drv = NULL; 515 struct rte_pci_id *id_table = NULL; 516 struct virtual_ethdev_private *dev_private = NULL; 517 char name_buf[RTE_RING_NAMESIZE]; 518 519 520 /* now do all data allocation - for eth_dev structure, dummy pci driver 521 * and internal (dev_private) data 522 */ 523 524 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id); 525 if (pci_dev == NULL) 526 goto err; 527 528 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id); 529 if (pci_drv == NULL) 530 goto err; 531 532 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id); 533 if (id_table == NULL) 534 goto err; 535 id_table->device_id = 0xBEEF; 536 537 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id); 538 if (dev_private == NULL) 539 goto err; 540 541 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name); 542 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 543 0); 544 if (dev_private->rx_queue == NULL) 545 goto err; 546 547 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name); 548 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, 549 0); 550 if (dev_private->tx_queue == NULL) 551 goto err; 552 553 /* reserve an ethdev entry */ 554 eth_dev = rte_eth_dev_allocate(name); 555 if (eth_dev == NULL) 556 goto err; 557 558 pci_dev->device.numa_node = socket_id; 559 pci_dev->device.name = eth_dev->data->name; 560 pci_drv->driver.name = virtual_ethdev_driver_name; 561 pci_drv->id_table = id_table; 562 563 if (isr_support) 564 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC; 565 else 566 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; 567 568 569 eth_dev->device = &pci_dev->device; 570 eth_dev->device->driver = &pci_drv->driver; 571 572 eth_dev->data->nb_rx_queues = (uint16_t)1; 573 eth_dev->data->nb_tx_queues = (uint16_t)1; 574 575 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; 576 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G; 577 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 578 579 eth_dev->data->mac_addrs = rte_zmalloc(name, RTE_ETHER_ADDR_LEN, 0); 580 if (eth_dev->data->mac_addrs == NULL) 581 goto err; 582 583 memcpy(eth_dev->data->mac_addrs, mac_addr, 584 sizeof(*eth_dev->data->mac_addrs)); 585 586 eth_dev->data->dev_started = 0; 587 eth_dev->data->promiscuous = 0; 588 eth_dev->data->scattered_rx = 0; 589 eth_dev->data->all_multicast = 0; 590 591 eth_dev->data->dev_private = dev_private; 592 593 /* Copy default device operation functions */ 594 dev_private->dev_ops = virtual_ethdev_default_dev_ops; 595 eth_dev->dev_ops = &dev_private->dev_ops; 596 597 pci_dev->device.driver = &pci_drv->driver; 598 eth_dev->device = &pci_dev->device; 599 600 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success; 601 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; 602 603 rte_eth_dev_probing_finish(eth_dev); 604 605 return eth_dev->data->port_id; 606 607 err: 608 rte_free(pci_dev); 609 rte_free(pci_drv); 610 rte_free(id_table); 611 rte_free(dev_private); 612 613 return -1; 614 } 615