1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include "rte_eth_ring.h" 6 #include <rte_mbuf.h> 7 #include <ethdev_driver.h> 8 #include <rte_malloc.h> 9 #include <rte_memcpy.h> 10 #include <rte_string_fns.h> 11 #include <rte_bus_vdev.h> 12 #include <rte_kvargs.h> 13 #include <rte_errno.h> 14 15 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction" 16 #define ETH_RING_ACTION_CREATE "CREATE" 17 #define ETH_RING_ACTION_ATTACH "ATTACH" 18 #define ETH_RING_INTERNAL_ARG "internal" 19 #define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */ 20 21 static const char *valid_arguments[] = { 22 ETH_RING_NUMA_NODE_ACTION_ARG, 23 ETH_RING_INTERNAL_ARG, 24 NULL 25 }; 26 27 struct ring_internal_args { 28 struct rte_ring * const *rx_queues; 29 const unsigned int nb_rx_queues; 30 struct rte_ring * const *tx_queues; 31 const unsigned int nb_tx_queues; 32 const unsigned int numa_node; 33 void *addr; /* self addr for sanity check */ 34 }; 35 36 enum dev_action { 37 DEV_CREATE, 38 DEV_ATTACH 39 }; 40 41 struct ring_queue { 42 struct rte_ring *rng; 43 rte_atomic64_t rx_pkts; 44 rte_atomic64_t tx_pkts; 45 }; 46 47 struct pmd_internals { 48 unsigned int max_rx_queues; 49 unsigned int max_tx_queues; 50 51 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS]; 52 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS]; 53 54 struct rte_ether_addr address; 55 enum dev_action action; 56 }; 57 58 static struct rte_eth_link pmd_link = { 59 .link_speed = RTE_ETH_SPEED_NUM_10G, 60 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, 61 .link_status = RTE_ETH_LINK_DOWN, 62 .link_autoneg = RTE_ETH_LINK_FIXED, 63 }; 64 65 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE); 66 67 #define PMD_LOG(level, fmt, args...) \ 68 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \ 69 "%s(): " fmt "\n", __func__, ##args) 70 71 static uint16_t 72 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 73 { 74 void **ptrs = (void *)&bufs[0]; 75 struct ring_queue *r = q; 76 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, 77 ptrs, nb_bufs, NULL); 78 if (r->rng->flags & RING_F_SC_DEQ) 79 r->rx_pkts.cnt += nb_rx; 80 else 81 rte_atomic64_add(&(r->rx_pkts), nb_rx); 82 return nb_rx; 83 } 84 85 static uint16_t 86 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 87 { 88 void **ptrs = (void *)&bufs[0]; 89 struct ring_queue *r = q; 90 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, 91 ptrs, nb_bufs, NULL); 92 if (r->rng->flags & RING_F_SP_ENQ) 93 r->tx_pkts.cnt += nb_tx; 94 else 95 rte_atomic64_add(&(r->tx_pkts), nb_tx); 96 return nb_tx; 97 } 98 99 static int 100 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; } 101 102 static int 103 eth_dev_start(struct rte_eth_dev *dev) 104 { 105 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 106 return 0; 107 } 108 109 static int 110 eth_dev_stop(struct rte_eth_dev *dev) 111 { 112 dev->data->dev_started = 0; 113 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 114 return 0; 115 } 116 117 static int 118 eth_dev_set_link_down(struct rte_eth_dev *dev) 119 { 120 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 121 return 0; 122 } 123 124 static int 125 eth_dev_set_link_up(struct rte_eth_dev *dev) 126 { 127 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 128 return 0; 129 } 130 131 static int 132 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 133 uint16_t nb_rx_desc __rte_unused, 134 unsigned int socket_id __rte_unused, 135 const struct rte_eth_rxconf *rx_conf __rte_unused, 136 struct rte_mempool *mb_pool __rte_unused) 137 { 138 struct pmd_internals *internals = dev->data->dev_private; 139 140 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; 141 return 0; 142 } 143 144 static int 145 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 146 uint16_t nb_tx_desc __rte_unused, 147 unsigned int socket_id __rte_unused, 148 const struct rte_eth_txconf *tx_conf __rte_unused) 149 { 150 struct pmd_internals *internals = dev->data->dev_private; 151 152 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; 153 return 0; 154 } 155 156 157 static int 158 eth_dev_info(struct rte_eth_dev *dev, 159 struct rte_eth_dev_info *dev_info) 160 { 161 struct pmd_internals *internals = dev->data->dev_private; 162 163 dev_info->max_mac_addrs = 1; 164 dev_info->max_rx_pktlen = (uint32_t)-1; 165 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; 166 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER; 167 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 168 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; 169 dev_info->min_rx_bufsize = 0; 170 171 return 0; 172 } 173 174 static int 175 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 176 { 177 unsigned int i; 178 unsigned long rx_total = 0, tx_total = 0; 179 const struct pmd_internals *internal = dev->data->dev_private; 180 181 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 182 i < dev->data->nb_rx_queues; i++) { 183 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt; 184 rx_total += stats->q_ipackets[i]; 185 } 186 187 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 188 i < dev->data->nb_tx_queues; i++) { 189 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt; 190 tx_total += stats->q_opackets[i]; 191 } 192 193 stats->ipackets = rx_total; 194 stats->opackets = tx_total; 195 196 return 0; 197 } 198 199 static int 200 eth_stats_reset(struct rte_eth_dev *dev) 201 { 202 unsigned int i; 203 struct pmd_internals *internal = dev->data->dev_private; 204 205 for (i = 0; i < dev->data->nb_rx_queues; i++) 206 internal->rx_ring_queues[i].rx_pkts.cnt = 0; 207 for (i = 0; i < dev->data->nb_tx_queues; i++) 208 internal->tx_ring_queues[i].tx_pkts.cnt = 0; 209 210 return 0; 211 } 212 213 static void 214 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, 215 uint32_t index __rte_unused) 216 { 217 } 218 219 static int 220 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, 221 struct rte_ether_addr *mac_addr __rte_unused, 222 uint32_t index __rte_unused, 223 uint32_t vmdq __rte_unused) 224 { 225 return 0; 226 } 227 228 static int 229 eth_link_update(struct rte_eth_dev *dev __rte_unused, 230 int wait_to_complete __rte_unused) { return 0; } 231 232 static int 233 eth_dev_close(struct rte_eth_dev *dev) 234 { 235 struct pmd_internals *internals = NULL; 236 struct ring_queue *r = NULL; 237 uint16_t i; 238 int ret; 239 240 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 241 return 0; 242 243 ret = eth_dev_stop(dev); 244 245 internals = dev->data->dev_private; 246 if (internals->action == DEV_CREATE) { 247 /* 248 * it is only necessary to delete the rings in rx_queues because 249 * they are the same used in tx_queues 250 */ 251 for (i = 0; i < dev->data->nb_rx_queues; i++) { 252 r = dev->data->rx_queues[i]; 253 rte_ring_free(r->rng); 254 } 255 } 256 257 /* mac_addrs must not be freed alone because part of dev_private */ 258 dev->data->mac_addrs = NULL; 259 260 return ret; 261 } 262 263 static const struct eth_dev_ops ops = { 264 .dev_close = eth_dev_close, 265 .dev_start = eth_dev_start, 266 .dev_stop = eth_dev_stop, 267 .dev_set_link_up = eth_dev_set_link_up, 268 .dev_set_link_down = eth_dev_set_link_down, 269 .dev_configure = eth_dev_configure, 270 .dev_infos_get = eth_dev_info, 271 .rx_queue_setup = eth_rx_queue_setup, 272 .tx_queue_setup = eth_tx_queue_setup, 273 .link_update = eth_link_update, 274 .stats_get = eth_stats_get, 275 .stats_reset = eth_stats_reset, 276 .mac_addr_remove = eth_mac_addr_remove, 277 .mac_addr_add = eth_mac_addr_add, 278 }; 279 280 static int 281 do_eth_dev_ring_create(const char *name, 282 struct rte_vdev_device *vdev, 283 struct rte_ring * const rx_queues[], 284 const unsigned int nb_rx_queues, 285 struct rte_ring *const tx_queues[], 286 const unsigned int nb_tx_queues, 287 const unsigned int numa_node, enum dev_action action, 288 struct rte_eth_dev **eth_dev_p) 289 { 290 struct rte_eth_dev_data *data = NULL; 291 struct pmd_internals *internals = NULL; 292 struct rte_eth_dev *eth_dev = NULL; 293 void **rx_queues_local = NULL; 294 void **tx_queues_local = NULL; 295 unsigned int i; 296 297 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", 298 numa_node); 299 300 rx_queues_local = rte_calloc_socket(name, nb_rx_queues, 301 sizeof(void *), 0, numa_node); 302 if (rx_queues_local == NULL) { 303 rte_errno = ENOMEM; 304 goto error; 305 } 306 307 tx_queues_local = rte_calloc_socket(name, nb_tx_queues, 308 sizeof(void *), 0, numa_node); 309 if (tx_queues_local == NULL) { 310 rte_errno = ENOMEM; 311 goto error; 312 } 313 314 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); 315 if (internals == NULL) { 316 rte_errno = ENOMEM; 317 goto error; 318 } 319 320 /* reserve an ethdev entry */ 321 eth_dev = rte_eth_dev_allocate(name); 322 if (eth_dev == NULL) { 323 rte_errno = ENOSPC; 324 goto error; 325 } 326 327 /* now put it all together 328 * - store EAL device in eth_dev, 329 * - store queue data in internals, 330 * - store numa_node info in eth_dev_data 331 * - point eth_dev_data to internals 332 * - and point eth_dev structure to new eth_dev_data structure 333 */ 334 335 eth_dev->device = &vdev->device; 336 337 data = eth_dev->data; 338 data->rx_queues = rx_queues_local; 339 data->tx_queues = tx_queues_local; 340 341 internals->action = action; 342 internals->max_rx_queues = nb_rx_queues; 343 internals->max_tx_queues = nb_tx_queues; 344 for (i = 0; i < nb_rx_queues; i++) { 345 internals->rx_ring_queues[i].rng = rx_queues[i]; 346 data->rx_queues[i] = &internals->rx_ring_queues[i]; 347 } 348 for (i = 0; i < nb_tx_queues; i++) { 349 internals->tx_ring_queues[i].rng = tx_queues[i]; 350 data->tx_queues[i] = &internals->tx_ring_queues[i]; 351 } 352 353 data->dev_private = internals; 354 data->nb_rx_queues = (uint16_t)nb_rx_queues; 355 data->nb_tx_queues = (uint16_t)nb_tx_queues; 356 data->dev_link = pmd_link; 357 data->mac_addrs = &internals->address; 358 data->promiscuous = 1; 359 data->all_multicast = 1; 360 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 361 362 eth_dev->dev_ops = &ops; 363 data->numa_node = numa_node; 364 365 /* finally assign rx and tx ops */ 366 eth_dev->rx_pkt_burst = eth_ring_rx; 367 eth_dev->tx_pkt_burst = eth_ring_tx; 368 369 rte_eth_dev_probing_finish(eth_dev); 370 *eth_dev_p = eth_dev; 371 372 return data->port_id; 373 374 error: 375 rte_free(rx_queues_local); 376 rte_free(tx_queues_local); 377 rte_free(internals); 378 379 return -1; 380 } 381 382 int 383 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], 384 const unsigned int nb_rx_queues, 385 struct rte_ring *const tx_queues[], 386 const unsigned int nb_tx_queues, 387 const unsigned int numa_node) 388 { 389 struct ring_internal_args args = { 390 .rx_queues = rx_queues, 391 .nb_rx_queues = nb_rx_queues, 392 .tx_queues = tx_queues, 393 .nb_tx_queues = nb_tx_queues, 394 .numa_node = numa_node, 395 .addr = &args, 396 }; 397 char args_str[32]; 398 char ring_name[RTE_RING_NAMESIZE]; 399 uint16_t port_id = RTE_MAX_ETHPORTS; 400 int ret; 401 402 /* do some parameter checking */ 403 if (rx_queues == NULL && nb_rx_queues > 0) { 404 rte_errno = EINVAL; 405 return -1; 406 } 407 if (tx_queues == NULL && nb_tx_queues > 0) { 408 rte_errno = EINVAL; 409 return -1; 410 } 411 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) { 412 rte_errno = EINVAL; 413 return -1; 414 } 415 416 snprintf(args_str, sizeof(args_str), "%s=%p", 417 ETH_RING_INTERNAL_ARG, &args); 418 419 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name); 420 if (ret >= (int)sizeof(ring_name)) { 421 rte_errno = ENAMETOOLONG; 422 return -1; 423 } 424 425 ret = rte_vdev_init(ring_name, args_str); 426 if (ret) { 427 rte_errno = EINVAL; 428 return -1; 429 } 430 431 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id); 432 if (ret) { 433 rte_errno = ENODEV; 434 return -1; 435 } 436 437 return port_id; 438 } 439 440 int 441 rte_eth_from_ring(struct rte_ring *r) 442 { 443 return rte_eth_from_rings(r->name, &r, 1, &r, 1, 444 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY); 445 } 446 447 static int 448 eth_dev_ring_create(const char *name, 449 struct rte_vdev_device *vdev, 450 const unsigned int numa_node, 451 enum dev_action action, struct rte_eth_dev **eth_dev) 452 { 453 /* rx and tx are so-called from point of view of first port. 454 * They are inverted from the point of view of second port 455 */ 456 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS]; 457 unsigned int i; 458 char rng_name[RTE_RING_NAMESIZE]; 459 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS, 460 RTE_PMD_RING_MAX_TX_RINGS); 461 462 for (i = 0; i < num_rings; i++) { 463 int cc; 464 465 cc = snprintf(rng_name, sizeof(rng_name), 466 "ETH_RXTX%u_%s", i, name); 467 if (cc >= (int)sizeof(rng_name)) { 468 rte_errno = ENAMETOOLONG; 469 return -1; 470 } 471 472 rxtx[i] = (action == DEV_CREATE) ? 473 rte_ring_create(rng_name, 1024, numa_node, 474 RING_F_SP_ENQ|RING_F_SC_DEQ) : 475 rte_ring_lookup(rng_name); 476 if (rxtx[i] == NULL) 477 return -1; 478 } 479 480 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings, 481 numa_node, action, eth_dev) < 0) 482 return -1; 483 484 return 0; 485 } 486 487 struct node_action_pair { 488 char name[PATH_MAX]; 489 unsigned int node; 490 enum dev_action action; 491 }; 492 493 struct node_action_list { 494 unsigned int total; 495 unsigned int count; 496 struct node_action_pair *list; 497 }; 498 499 static int parse_kvlist(const char *key __rte_unused, 500 const char *value, void *data) 501 { 502 struct node_action_list *info = data; 503 int ret; 504 char *name; 505 char *action; 506 char *node; 507 char *end; 508 509 name = strdup(value); 510 511 ret = -EINVAL; 512 513 if (!name) { 514 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); 515 goto out; 516 } 517 518 node = strchr(name, ':'); 519 if (!node) { 520 PMD_LOG(WARNING, "could not parse node value from %s", 521 name); 522 goto out; 523 } 524 525 *node = '\0'; 526 node++; 527 528 action = strchr(node, ':'); 529 if (!action) { 530 PMD_LOG(WARNING, "could not parse action value from %s", 531 node); 532 goto out; 533 } 534 535 *action = '\0'; 536 action++; 537 538 /* 539 * Need to do some sanity checking here 540 */ 541 542 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0) 543 info->list[info->count].action = DEV_ATTACH; 544 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0) 545 info->list[info->count].action = DEV_CREATE; 546 else 547 goto out; 548 549 errno = 0; 550 info->list[info->count].node = strtol(node, &end, 10); 551 552 if ((errno != 0) || (*end != '\0')) { 553 PMD_LOG(WARNING, 554 "node value %s is unparseable as a number", node); 555 goto out; 556 } 557 558 strlcpy(info->list[info->count].name, name, 559 sizeof(info->list[info->count].name)); 560 561 info->count++; 562 563 ret = 0; 564 out: 565 free(name); 566 return ret; 567 } 568 569 static int 570 parse_internal_args(const char *key __rte_unused, const char *value, 571 void *data) 572 { 573 struct ring_internal_args **internal_args = data; 574 void *args; 575 int ret, n; 576 577 /* make sure 'value' is valid pointer length */ 578 if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >= 579 ETH_RING_INTERNAL_ARG_MAX_LEN) { 580 PMD_LOG(ERR, "Error parsing internal args, argument is too long"); 581 return -1; 582 } 583 584 ret = sscanf(value, "%p%n", &args, &n); 585 if (ret == 0 || (size_t)n != strlen(value)) { 586 PMD_LOG(ERR, "Error parsing internal args"); 587 588 return -1; 589 } 590 591 *internal_args = args; 592 593 if ((*internal_args)->addr != args) 594 return -1; 595 596 return 0; 597 } 598 599 static int 600 rte_pmd_ring_probe(struct rte_vdev_device *dev) 601 { 602 const char *name, *params; 603 struct rte_kvargs *kvlist = NULL; 604 int ret = 0; 605 struct node_action_list *info = NULL; 606 struct rte_eth_dev *eth_dev = NULL; 607 struct ring_internal_args *internal_args; 608 609 name = rte_vdev_device_name(dev); 610 params = rte_vdev_device_args(dev); 611 612 PMD_LOG(INFO, "Initializing pmd_ring for %s", name); 613 614 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 615 eth_dev = rte_eth_dev_attach_secondary(name); 616 if (!eth_dev) { 617 PMD_LOG(ERR, "Failed to probe %s", name); 618 return -1; 619 } 620 eth_dev->dev_ops = &ops; 621 eth_dev->device = &dev->device; 622 623 eth_dev->rx_pkt_burst = eth_ring_rx; 624 eth_dev->tx_pkt_burst = eth_ring_tx; 625 626 rte_eth_dev_probing_finish(eth_dev); 627 628 return 0; 629 } 630 631 if (params == NULL || params[0] == '\0') { 632 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE, 633 ð_dev); 634 if (ret == -1) { 635 PMD_LOG(INFO, 636 "Attach to pmd_ring for %s", name); 637 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 638 DEV_ATTACH, ð_dev); 639 } 640 } else { 641 kvlist = rte_kvargs_parse(params, valid_arguments); 642 643 if (!kvlist) { 644 PMD_LOG(INFO, 645 "Ignoring unsupported parameters when creating rings-backed ethernet device"); 646 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 647 DEV_CREATE, ð_dev); 648 if (ret == -1) { 649 PMD_LOG(INFO, 650 "Attach to pmd_ring for %s", 651 name); 652 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 653 DEV_ATTACH, ð_dev); 654 } 655 656 return ret; 657 } 658 659 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) { 660 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG, 661 parse_internal_args, 662 &internal_args); 663 if (ret < 0) 664 goto out_free; 665 666 ret = do_eth_dev_ring_create(name, dev, 667 internal_args->rx_queues, 668 internal_args->nb_rx_queues, 669 internal_args->tx_queues, 670 internal_args->nb_tx_queues, 671 internal_args->numa_node, 672 DEV_ATTACH, 673 ð_dev); 674 if (ret >= 0) 675 ret = 0; 676 } else { 677 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG); 678 info = rte_zmalloc("struct node_action_list", 679 sizeof(struct node_action_list) + 680 (sizeof(struct node_action_pair) * ret), 681 0); 682 if (!info) 683 goto out_free; 684 685 info->total = ret; 686 info->list = (struct node_action_pair *)(info + 1); 687 688 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG, 689 parse_kvlist, info); 690 691 if (ret < 0) 692 goto out_free; 693 694 for (info->count = 0; info->count < info->total; info->count++) { 695 ret = eth_dev_ring_create(info->list[info->count].name, 696 dev, 697 info->list[info->count].node, 698 info->list[info->count].action, 699 ð_dev); 700 if ((ret == -1) && 701 (info->list[info->count].action == DEV_CREATE)) { 702 PMD_LOG(INFO, 703 "Attach to pmd_ring for %s", 704 name); 705 ret = eth_dev_ring_create(name, dev, 706 info->list[info->count].node, 707 DEV_ATTACH, 708 ð_dev); 709 } 710 } 711 } 712 } 713 714 out_free: 715 rte_kvargs_free(kvlist); 716 rte_free(info); 717 return ret; 718 } 719 720 static int 721 rte_pmd_ring_remove(struct rte_vdev_device *dev) 722 { 723 const char *name = rte_vdev_device_name(dev); 724 struct rte_eth_dev *eth_dev = NULL; 725 726 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); 727 728 if (name == NULL) 729 return -EINVAL; 730 731 /* find an ethdev entry */ 732 eth_dev = rte_eth_dev_allocated(name); 733 if (eth_dev == NULL) 734 return 0; /* port already released */ 735 736 eth_dev_close(eth_dev); 737 rte_eth_dev_release_port(eth_dev); 738 return 0; 739 } 740 741 static struct rte_vdev_driver pmd_ring_drv = { 742 .probe = rte_pmd_ring_probe, 743 .remove = rte_pmd_ring_remove, 744 }; 745 746 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); 747 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); 748 RTE_PMD_REGISTER_PARAM_STRING(net_ring, 749 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); 750