1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 7 #include "rte_eth_ring.h" 8 #include <rte_mbuf.h> 9 #include <ethdev_driver.h> 10 #include <rte_malloc.h> 11 #include <rte_memcpy.h> 12 #include <rte_os_shim.h> 13 #include <rte_string_fns.h> 14 #include <bus_vdev_driver.h> 15 #include <rte_kvargs.h> 16 #include <rte_errno.h> 17 18 #define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction" 19 #define ETH_RING_ACTION_CREATE "CREATE" 20 #define ETH_RING_ACTION_ATTACH "ATTACH" 21 #define ETH_RING_ACTION_MAX_LEN 8 /* CREATE | ACTION */ 22 #define ETH_RING_INTERNAL_ARG "internal" 23 #define ETH_RING_INTERNAL_ARG_MAX_LEN 19 /* "0x..16chars..\0" */ 24 25 static const char *valid_arguments[] = { 26 ETH_RING_NUMA_NODE_ACTION_ARG, 27 ETH_RING_INTERNAL_ARG, 28 NULL 29 }; 30 31 struct ring_internal_args { 32 struct rte_ring * const *rx_queues; 33 const unsigned int nb_rx_queues; 34 struct rte_ring * const *tx_queues; 35 const unsigned int nb_tx_queues; 36 const unsigned int numa_node; 37 void *addr; /* self addr for sanity check */ 38 }; 39 40 enum dev_action { 41 DEV_CREATE, 42 DEV_ATTACH 43 }; 44 45 struct ring_queue { 46 struct rte_ring *rng; 47 uint16_t in_port; 48 RTE_ATOMIC(uint64_t) rx_pkts; 49 RTE_ATOMIC(uint64_t) tx_pkts; 50 }; 51 52 struct pmd_internals { 53 unsigned int max_rx_queues; 54 unsigned int max_tx_queues; 55 56 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS]; 57 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS]; 58 59 struct rte_ether_addr address; 60 enum dev_action action; 61 }; 62 63 static struct rte_eth_link pmd_link = { 64 .link_speed = RTE_ETH_SPEED_NUM_10G, 65 .link_duplex = RTE_ETH_LINK_FULL_DUPLEX, 66 .link_status = RTE_ETH_LINK_DOWN, 67 .link_autoneg = RTE_ETH_LINK_FIXED, 68 }; 69 70 RTE_LOG_REGISTER_DEFAULT(eth_ring_logtype, NOTICE); 71 #define RTE_LOGTYPE_ETH_RING eth_ring_logtype 72 73 #define PMD_LOG(level, ...) \ 74 RTE_LOG_LINE_PREFIX(level, ETH_RING, "%s(): ", __func__, __VA_ARGS__) 75 76 static uint16_t 77 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 78 { 79 unsigned int i; 80 void **ptrs = (void *)&bufs[0]; 81 struct ring_queue *r = q; 82 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng, 83 ptrs, nb_bufs, NULL); 84 for (i = 0; i < nb_rx; i++) 85 bufs[i]->port = r->in_port; 86 if (r->rng->flags & RING_F_SC_DEQ) 87 r->rx_pkts += nb_rx; 88 else 89 rte_atomic_fetch_add_explicit(&r->rx_pkts, nb_rx, rte_memory_order_relaxed); 90 return nb_rx; 91 } 92 93 static uint16_t 94 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) 95 { 96 void **ptrs = (void *)&bufs[0]; 97 struct ring_queue *r = q; 98 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng, 99 ptrs, nb_bufs, NULL); 100 if (r->rng->flags & RING_F_SP_ENQ) 101 r->tx_pkts += nb_tx; 102 else 103 rte_atomic_fetch_add_explicit(&r->tx_pkts, nb_tx, rte_memory_order_relaxed); 104 return nb_tx; 105 } 106 107 static int 108 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; } 109 110 static int 111 eth_dev_start(struct rte_eth_dev *dev) 112 { 113 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 114 return 0; 115 } 116 117 static int 118 eth_dev_stop(struct rte_eth_dev *dev) 119 { 120 uint16_t i; 121 122 dev->data->dev_started = 0; 123 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 124 125 for (i = 0; i < dev->data->nb_rx_queues; i++) 126 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 127 for (i = 0; i < dev->data->nb_tx_queues; i++) 128 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED; 129 return 0; 130 } 131 132 static int 133 eth_dev_set_link_down(struct rte_eth_dev *dev) 134 { 135 uint16_t i; 136 137 dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 138 139 for (i = 0; i < dev->data->nb_rx_queues; i++) 140 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 141 for (i = 0; i < dev->data->nb_tx_queues; i++) 142 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 143 144 return 0; 145 } 146 147 static int 148 eth_dev_set_link_up(struct rte_eth_dev *dev) 149 { 150 dev->data->dev_link.link_status = RTE_ETH_LINK_UP; 151 return 0; 152 } 153 154 static int 155 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 156 uint16_t nb_rx_desc __rte_unused, 157 unsigned int socket_id __rte_unused, 158 const struct rte_eth_rxconf *rx_conf __rte_unused, 159 struct rte_mempool *mb_pool __rte_unused) 160 { 161 struct pmd_internals *internals = dev->data->dev_private; 162 internals->rx_ring_queues[rx_queue_id].in_port = dev->data->port_id; 163 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id]; 164 return 0; 165 } 166 167 static int 168 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 169 uint16_t nb_tx_desc __rte_unused, 170 unsigned int socket_id __rte_unused, 171 const struct rte_eth_txconf *tx_conf __rte_unused) 172 { 173 struct pmd_internals *internals = dev->data->dev_private; 174 175 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id]; 176 return 0; 177 } 178 179 180 static int 181 eth_dev_info(struct rte_eth_dev *dev, 182 struct rte_eth_dev_info *dev_info) 183 { 184 struct pmd_internals *internals = dev->data->dev_private; 185 186 dev_info->max_mac_addrs = 1; 187 dev_info->max_rx_pktlen = (uint32_t)-1; 188 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues; 189 dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER; 190 dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS; 191 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues; 192 dev_info->min_rx_bufsize = 0; 193 194 return 0; 195 } 196 197 static int 198 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 199 { 200 unsigned int i; 201 unsigned long rx_total = 0, tx_total = 0; 202 const struct pmd_internals *internal = dev->data->dev_private; 203 204 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 205 i < dev->data->nb_rx_queues; i++) { 206 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts; 207 rx_total += stats->q_ipackets[i]; 208 } 209 210 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && 211 i < dev->data->nb_tx_queues; i++) { 212 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts; 213 tx_total += stats->q_opackets[i]; 214 } 215 216 stats->ipackets = rx_total; 217 stats->opackets = tx_total; 218 219 return 0; 220 } 221 222 static int 223 eth_stats_reset(struct rte_eth_dev *dev) 224 { 225 unsigned int i; 226 struct pmd_internals *internal = dev->data->dev_private; 227 228 for (i = 0; i < dev->data->nb_rx_queues; i++) 229 internal->rx_ring_queues[i].rx_pkts = 0; 230 for (i = 0; i < dev->data->nb_tx_queues; i++) 231 internal->tx_ring_queues[i].tx_pkts = 0; 232 233 return 0; 234 } 235 236 static void 237 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused, 238 uint32_t index __rte_unused) 239 { 240 } 241 242 static int 243 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, 244 struct rte_ether_addr *mac_addr __rte_unused, 245 uint32_t index __rte_unused, 246 uint32_t vmdq __rte_unused) 247 { 248 return 0; 249 } 250 251 static int 252 eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused) 253 { 254 return 0; 255 } 256 257 static int 258 eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused) 259 { 260 return 0; 261 } 262 263 static int 264 eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused) 265 { 266 return 0; 267 } 268 269 static int 270 eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused) 271 { 272 return 0; 273 } 274 275 static int 276 eth_link_update(struct rte_eth_dev *dev __rte_unused, 277 int wait_to_complete __rte_unused) { return 0; } 278 279 static int 280 eth_dev_close(struct rte_eth_dev *dev) 281 { 282 struct pmd_internals *internals = NULL; 283 struct ring_queue *r = NULL; 284 uint16_t i; 285 int ret; 286 287 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 288 return 0; 289 290 ret = eth_dev_stop(dev); 291 292 internals = dev->data->dev_private; 293 if (internals->action == DEV_CREATE) { 294 /* 295 * it is only necessary to delete the rings in rx_queues because 296 * they are the same used in tx_queues 297 */ 298 for (i = 0; i < dev->data->nb_rx_queues; i++) { 299 r = dev->data->rx_queues[i]; 300 rte_ring_free(r->rng); 301 } 302 } 303 304 /* mac_addrs must not be freed alone because part of dev_private */ 305 dev->data->mac_addrs = NULL; 306 307 return ret; 308 } 309 310 static int ring_monitor_callback(const uint64_t value, 311 const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ]) 312 { 313 /* Check if the head pointer has changed */ 314 return value != arg[0]; 315 } 316 317 static int 318 eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) 319 { 320 struct rte_ring *rng = ((struct ring_queue *)rx_queue)->rng; 321 322 /* 323 * Monitor ring head since if head moves 324 * there are packets to transmit 325 */ 326 pmc->addr = &rng->prod.head; 327 pmc->size = sizeof(rng->prod.head); 328 pmc->opaque[0] = rng->prod.head; 329 pmc->fn = ring_monitor_callback; 330 return 0; 331 } 332 333 static const struct eth_dev_ops ops = { 334 .dev_close = eth_dev_close, 335 .dev_start = eth_dev_start, 336 .dev_stop = eth_dev_stop, 337 .dev_set_link_up = eth_dev_set_link_up, 338 .dev_set_link_down = eth_dev_set_link_down, 339 .dev_configure = eth_dev_configure, 340 .dev_infos_get = eth_dev_info, 341 .rx_queue_setup = eth_rx_queue_setup, 342 .tx_queue_setup = eth_tx_queue_setup, 343 .link_update = eth_link_update, 344 .stats_get = eth_stats_get, 345 .stats_reset = eth_stats_reset, 346 .mac_addr_remove = eth_mac_addr_remove, 347 .mac_addr_add = eth_mac_addr_add, 348 .promiscuous_enable = eth_promiscuous_enable, 349 .promiscuous_disable = eth_promiscuous_disable, 350 .allmulticast_enable = eth_allmulticast_enable, 351 .allmulticast_disable = eth_allmulticast_disable, 352 .get_monitor_addr = eth_get_monitor_addr, 353 }; 354 355 static int 356 do_eth_dev_ring_create(const char *name, 357 struct rte_vdev_device *vdev, 358 struct rte_ring * const rx_queues[], 359 const unsigned int nb_rx_queues, 360 struct rte_ring *const tx_queues[], 361 const unsigned int nb_tx_queues, 362 const unsigned int numa_node, enum dev_action action, 363 struct rte_eth_dev **eth_dev_p) 364 { 365 struct rte_eth_dev_data *data = NULL; 366 struct pmd_internals *internals = NULL; 367 struct rte_eth_dev *eth_dev = NULL; 368 void **rx_queues_local = NULL; 369 void **tx_queues_local = NULL; 370 unsigned int i; 371 372 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u", 373 numa_node); 374 375 rx_queues_local = rte_calloc_socket(name, nb_rx_queues, 376 sizeof(void *), 0, numa_node); 377 if (rx_queues_local == NULL) { 378 rte_errno = ENOMEM; 379 goto error; 380 } 381 382 tx_queues_local = rte_calloc_socket(name, nb_tx_queues, 383 sizeof(void *), 0, numa_node); 384 if (tx_queues_local == NULL) { 385 rte_errno = ENOMEM; 386 goto error; 387 } 388 389 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node); 390 if (internals == NULL) { 391 rte_errno = ENOMEM; 392 goto error; 393 } 394 395 /* reserve an ethdev entry */ 396 eth_dev = rte_eth_dev_allocate(name); 397 if (eth_dev == NULL) { 398 rte_errno = ENOSPC; 399 goto error; 400 } 401 402 /* now put it all together 403 * - store EAL device in eth_dev, 404 * - store queue data in internals, 405 * - store numa_node info in eth_dev_data 406 * - point eth_dev_data to internals 407 * - and point eth_dev structure to new eth_dev_data structure 408 */ 409 410 eth_dev->device = &vdev->device; 411 412 data = eth_dev->data; 413 data->rx_queues = rx_queues_local; 414 data->tx_queues = tx_queues_local; 415 416 internals->action = action; 417 internals->max_rx_queues = nb_rx_queues; 418 internals->max_tx_queues = nb_tx_queues; 419 for (i = 0; i < nb_rx_queues; i++) { 420 internals->rx_ring_queues[i].rng = rx_queues[i]; 421 internals->rx_ring_queues[i].in_port = -1; 422 data->rx_queues[i] = &internals->rx_ring_queues[i]; 423 } 424 for (i = 0; i < nb_tx_queues; i++) { 425 internals->tx_ring_queues[i].rng = tx_queues[i]; 426 internals->tx_ring_queues[i].in_port = -1; 427 data->tx_queues[i] = &internals->tx_ring_queues[i]; 428 } 429 430 data->dev_private = internals; 431 data->nb_rx_queues = (uint16_t)nb_rx_queues; 432 data->nb_tx_queues = (uint16_t)nb_tx_queues; 433 data->dev_link = pmd_link; 434 data->mac_addrs = &internals->address; 435 data->promiscuous = 1; 436 data->all_multicast = 1; 437 data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 438 439 eth_dev->dev_ops = &ops; 440 data->numa_node = numa_node; 441 442 /* finally assign rx and tx ops */ 443 eth_dev->rx_pkt_burst = eth_ring_rx; 444 eth_dev->tx_pkt_burst = eth_ring_tx; 445 446 rte_eth_dev_probing_finish(eth_dev); 447 *eth_dev_p = eth_dev; 448 449 return data->port_id; 450 451 error: 452 rte_free(rx_queues_local); 453 rte_free(tx_queues_local); 454 rte_free(internals); 455 456 return -1; 457 } 458 459 int 460 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[], 461 const unsigned int nb_rx_queues, 462 struct rte_ring *const tx_queues[], 463 const unsigned int nb_tx_queues, 464 const unsigned int numa_node) 465 { 466 struct ring_internal_args args = { 467 .rx_queues = rx_queues, 468 .nb_rx_queues = nb_rx_queues, 469 .tx_queues = tx_queues, 470 .nb_tx_queues = nb_tx_queues, 471 .numa_node = numa_node, 472 .addr = &args, 473 }; 474 char args_str[32]; 475 char ring_name[RTE_RING_NAMESIZE]; 476 uint16_t port_id = RTE_MAX_ETHPORTS; 477 int ret; 478 479 /* do some parameter checking */ 480 if (rx_queues == NULL && nb_rx_queues > 0) { 481 rte_errno = EINVAL; 482 return -1; 483 } 484 if (tx_queues == NULL && nb_tx_queues > 0) { 485 rte_errno = EINVAL; 486 return -1; 487 } 488 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) { 489 rte_errno = EINVAL; 490 return -1; 491 } 492 493 snprintf(args_str, sizeof(args_str), "%s=%p", 494 ETH_RING_INTERNAL_ARG, &args); 495 496 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name); 497 if (ret >= (int)sizeof(ring_name)) { 498 rte_errno = ENAMETOOLONG; 499 return -1; 500 } 501 502 ret = rte_vdev_init(ring_name, args_str); 503 if (ret) { 504 rte_errno = EINVAL; 505 return -1; 506 } 507 508 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id); 509 if (ret) { 510 rte_errno = ENODEV; 511 return -1; 512 } 513 514 return port_id; 515 } 516 517 int 518 rte_eth_from_ring(struct rte_ring *r) 519 { 520 return rte_eth_from_rings(r->name, &r, 1, &r, 1, 521 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY); 522 } 523 524 static int 525 eth_dev_ring_create(const char *name, 526 struct rte_vdev_device *vdev, 527 const unsigned int numa_node, 528 enum dev_action action, struct rte_eth_dev **eth_dev) 529 { 530 /* rx and tx are so-called from point of view of first port. 531 * They are inverted from the point of view of second port 532 */ 533 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS]; 534 unsigned int i; 535 char rng_name[RTE_RING_NAMESIZE]; 536 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS, 537 RTE_PMD_RING_MAX_TX_RINGS); 538 539 for (i = 0; i < num_rings; i++) { 540 int cc; 541 542 cc = snprintf(rng_name, sizeof(rng_name), 543 "ETH_RXTX%u_%s", i, name); 544 if (cc >= (int)sizeof(rng_name)) { 545 rte_errno = ENAMETOOLONG; 546 return -1; 547 } 548 549 rxtx[i] = (action == DEV_CREATE) ? 550 rte_ring_create(rng_name, 1024, numa_node, 551 RING_F_SP_ENQ|RING_F_SC_DEQ) : 552 rte_ring_lookup(rng_name); 553 if (rxtx[i] == NULL) 554 return -1; 555 } 556 557 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings, 558 numa_node, action, eth_dev) < 0) 559 return -1; 560 561 return 0; 562 } 563 564 struct node_action_pair { 565 char name[ETH_RING_ACTION_MAX_LEN]; 566 unsigned int node; 567 enum dev_action action; 568 }; 569 570 struct node_action_list { 571 unsigned int total; 572 unsigned int count; 573 struct node_action_pair *list; 574 }; 575 576 static int parse_kvlist(const char *key __rte_unused, 577 const char *value, void *data) 578 { 579 struct node_action_list *info = data; 580 int ret; 581 char *name; 582 char *action; 583 char *node; 584 char *end; 585 586 name = strdup(value); 587 588 ret = -EINVAL; 589 590 if (!name) { 591 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!"); 592 goto out; 593 } 594 595 node = strchr(name, ':'); 596 if (!node) { 597 PMD_LOG(WARNING, "could not parse node value from %s", 598 name); 599 goto out; 600 } 601 602 *node = '\0'; 603 node++; 604 605 action = strchr(node, ':'); 606 if (!action) { 607 PMD_LOG(WARNING, "could not parse action value from %s", 608 node); 609 goto out; 610 } 611 612 *action = '\0'; 613 action++; 614 615 /* 616 * Need to do some sanity checking here 617 */ 618 619 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0) 620 info->list[info->count].action = DEV_ATTACH; 621 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0) 622 info->list[info->count].action = DEV_CREATE; 623 else 624 goto out; 625 626 errno = 0; 627 info->list[info->count].node = strtol(node, &end, 10); 628 629 if ((errno != 0) || (*end != '\0')) { 630 PMD_LOG(WARNING, 631 "node value %s is unparseable as a number", node); 632 goto out; 633 } 634 635 strlcpy(info->list[info->count].name, name, 636 sizeof(info->list[info->count].name)); 637 638 info->count++; 639 640 ret = 0; 641 out: 642 free(name); 643 return ret; 644 } 645 646 static int 647 parse_internal_args(const char *key __rte_unused, const char *value, 648 void *data) 649 { 650 struct ring_internal_args **internal_args = data; 651 void *args; 652 int ret, n; 653 654 /* make sure 'value' is valid pointer length */ 655 if (strnlen(value, ETH_RING_INTERNAL_ARG_MAX_LEN) >= 656 ETH_RING_INTERNAL_ARG_MAX_LEN) { 657 PMD_LOG(ERR, "Error parsing internal args, argument is too long"); 658 return -1; 659 } 660 661 ret = sscanf(value, "%p%n", &args, &n); 662 if (ret == 0 || (size_t)n != strlen(value)) { 663 PMD_LOG(ERR, "Error parsing internal args"); 664 665 return -1; 666 } 667 668 *internal_args = args; 669 670 if ((*internal_args)->addr != args) 671 return -1; 672 673 return 0; 674 } 675 676 static int 677 rte_pmd_ring_probe(struct rte_vdev_device *dev) 678 { 679 const char *name, *params; 680 struct rte_kvargs *kvlist = NULL; 681 int ret = 0; 682 struct node_action_list *info = NULL; 683 struct rte_eth_dev *eth_dev = NULL; 684 struct ring_internal_args *internal_args; 685 686 name = rte_vdev_device_name(dev); 687 params = rte_vdev_device_args(dev); 688 689 PMD_LOG(INFO, "Initializing pmd_ring for %s", name); 690 691 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 692 eth_dev = rte_eth_dev_attach_secondary(name); 693 if (!eth_dev) { 694 PMD_LOG(ERR, "Failed to probe %s", name); 695 return -1; 696 } 697 eth_dev->dev_ops = &ops; 698 eth_dev->device = &dev->device; 699 700 eth_dev->rx_pkt_burst = eth_ring_rx; 701 eth_dev->tx_pkt_burst = eth_ring_tx; 702 703 rte_eth_dev_probing_finish(eth_dev); 704 705 return 0; 706 } 707 708 if (params == NULL || params[0] == '\0') { 709 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE, 710 ð_dev); 711 if (ret == -1) { 712 PMD_LOG(INFO, 713 "Attach to pmd_ring for %s", name); 714 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 715 DEV_ATTACH, ð_dev); 716 } 717 } else { 718 kvlist = rte_kvargs_parse(params, valid_arguments); 719 720 if (!kvlist) { 721 PMD_LOG(INFO, 722 "Ignoring unsupported parameters when creating rings-backed ethernet device"); 723 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 724 DEV_CREATE, ð_dev); 725 if (ret == -1) { 726 PMD_LOG(INFO, 727 "Attach to pmd_ring for %s", 728 name); 729 ret = eth_dev_ring_create(name, dev, rte_socket_id(), 730 DEV_ATTACH, ð_dev); 731 } 732 733 return ret; 734 } 735 736 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) { 737 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG, 738 parse_internal_args, 739 &internal_args); 740 if (ret < 0) 741 goto out_free; 742 743 ret = do_eth_dev_ring_create(name, dev, 744 internal_args->rx_queues, 745 internal_args->nb_rx_queues, 746 internal_args->tx_queues, 747 internal_args->nb_tx_queues, 748 internal_args->numa_node, 749 DEV_ATTACH, 750 ð_dev); 751 if (ret >= 0) 752 ret = 0; 753 } else { 754 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG); 755 info = rte_zmalloc("struct node_action_list", 756 sizeof(struct node_action_list) + 757 (sizeof(struct node_action_pair) * ret), 758 0); 759 if (!info) 760 goto out_free; 761 762 info->total = ret; 763 info->list = (struct node_action_pair *)(info + 1); 764 765 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG, 766 parse_kvlist, info); 767 768 if (ret < 0) 769 goto out_free; 770 771 for (info->count = 0; info->count < info->total; info->count++) { 772 ret = eth_dev_ring_create(info->list[info->count].name, 773 dev, 774 info->list[info->count].node, 775 info->list[info->count].action, 776 ð_dev); 777 if ((ret == -1) && 778 (info->list[info->count].action == DEV_CREATE)) { 779 PMD_LOG(INFO, 780 "Attach to pmd_ring for %s", 781 name); 782 ret = eth_dev_ring_create(name, dev, 783 info->list[info->count].node, 784 DEV_ATTACH, 785 ð_dev); 786 } 787 } 788 } 789 } 790 791 out_free: 792 rte_kvargs_free(kvlist); 793 rte_free(info); 794 return ret; 795 } 796 797 static int 798 rte_pmd_ring_remove(struct rte_vdev_device *dev) 799 { 800 const char *name = rte_vdev_device_name(dev); 801 struct rte_eth_dev *eth_dev = NULL; 802 803 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name); 804 805 if (name == NULL) 806 return -EINVAL; 807 808 /* find an ethdev entry */ 809 eth_dev = rte_eth_dev_allocated(name); 810 if (eth_dev == NULL) 811 return 0; /* port already released */ 812 813 eth_dev_close(eth_dev); 814 rte_eth_dev_release_port(eth_dev); 815 return 0; 816 } 817 818 static struct rte_vdev_driver pmd_ring_drv = { 819 .probe = rte_pmd_ring_probe, 820 .remove = rte_pmd_ring_remove, 821 }; 822 823 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv); 824 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring); 825 RTE_PMD_REGISTER_PARAM_STRING(net_ring, 826 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)"); 827