1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2017-2020 NXP 4 * 5 */ 6 /* System headers */ 7 #include <stdio.h> 8 #include <inttypes.h> 9 #include <unistd.h> 10 #include <limits.h> 11 #include <sched.h> 12 #include <signal.h> 13 #include <pthread.h> 14 #include <sys/types.h> 15 #include <sys/syscall.h> 16 #include <sys/eventfd.h> 17 18 #include <rte_byteorder.h> 19 #include <rte_common.h> 20 #include <rte_interrupts.h> 21 #include <rte_log.h> 22 #include <rte_debug.h> 23 #include <rte_atomic.h> 24 #include <rte_branch_prediction.h> 25 #include <rte_memory.h> 26 #include <rte_tailq.h> 27 #include <rte_eal.h> 28 #include <rte_alarm.h> 29 #include <rte_ether.h> 30 #include <ethdev_driver.h> 31 #include <rte_malloc.h> 32 #include <rte_ring.h> 33 #include <rte_bus.h> 34 #include <rte_mbuf_pool_ops.h> 35 #include <rte_mbuf_dyn.h> 36 37 #include <dpaa_of.h> 38 #include <rte_dpaa_bus.h> 39 #include <rte_dpaa_logs.h> 40 #include <dpaax_iova_table.h> 41 42 #include <fsl_usd.h> 43 #include <fsl_qman.h> 44 #include <fsl_bman.h> 45 #include <netcfg.h> 46 47 static struct rte_dpaa_bus rte_dpaa_bus; 48 struct netcfg_info *dpaa_netcfg; 49 50 /* define a variable to hold the portal_key, once created.*/ 51 static pthread_key_t dpaa_portal_key; 52 53 unsigned int dpaa_svr_family; 54 55 #define FSL_DPAA_BUS_NAME dpaa_bus 56 57 RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io); 58 59 #define DPAA_SEQN_DYNFIELD_NAME "dpaa_seqn_dynfield" 60 int dpaa_seqn_dynfield_offset = -1; 61 62 struct fm_eth_port_cfg * 63 dpaa_get_eth_port_cfg(int dev_id) 64 { 65 return &dpaa_netcfg->port_cfg[dev_id]; 66 } 67 68 static int 69 compare_dpaa_devices(struct rte_dpaa_device *dev1, 70 struct rte_dpaa_device *dev2) 71 { 72 int comp = 0; 73 74 /* Segragating ETH from SEC devices */ 75 if (dev1->device_type > dev2->device_type) 76 comp = 1; 77 else if (dev1->device_type < dev2->device_type) 78 comp = -1; 79 else 80 comp = 0; 81 82 if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH)) 83 return comp; 84 85 if (dev1->id.fman_id > dev2->id.fman_id) { 86 comp = 1; 87 } else if (dev1->id.fman_id < dev2->id.fman_id) { 88 comp = -1; 89 } else { 90 /* FMAN ids match, check for mac_id */ 91 if (dev1->id.mac_id > dev2->id.mac_id) 92 comp = 1; 93 else if (dev1->id.mac_id < dev2->id.mac_id) 94 comp = -1; 95 else 96 comp = 0; 97 } 98 99 return comp; 100 } 101 102 static inline void 103 dpaa_add_to_device_list(struct rte_dpaa_device *newdev) 104 { 105 int comp, inserted = 0; 106 struct rte_dpaa_device *dev = NULL; 107 struct rte_dpaa_device *tdev = NULL; 108 109 TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) { 110 comp = compare_dpaa_devices(newdev, dev); 111 if (comp < 0) { 112 TAILQ_INSERT_BEFORE(dev, newdev, next); 113 inserted = 1; 114 break; 115 } 116 } 117 118 if (!inserted) 119 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next); 120 } 121 122 /* 123 * Reads the SEC device from DTS 124 * Returns -1 if SEC devices not available, 0 otherwise 125 */ 126 static inline int 127 dpaa_sec_available(void) 128 { 129 const struct device_node *caam_node; 130 131 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") { 132 return 0; 133 } 134 135 return -1; 136 } 137 138 static void dpaa_clean_device_list(void); 139 140 static struct rte_devargs * 141 dpaa_devargs_lookup(struct rte_dpaa_device *dev) 142 { 143 struct rte_devargs *devargs; 144 char dev_name[32]; 145 146 RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) { 147 devargs->bus->parse(devargs->name, &dev_name); 148 if (strcmp(dev_name, dev->device.name) == 0) { 149 DPAA_BUS_INFO("**Devargs matched %s", dev_name); 150 return devargs; 151 } 152 } 153 return NULL; 154 } 155 156 static int 157 dpaa_create_device_list(void) 158 { 159 int i; 160 int ret; 161 struct rte_dpaa_device *dev; 162 struct fm_eth_port_cfg *cfg; 163 struct fman_if *fman_intf; 164 165 /* Creating Ethernet Devices */ 166 for (i = 0; i < dpaa_netcfg->num_ethports; i++) { 167 dev = calloc(1, sizeof(struct rte_dpaa_device)); 168 if (!dev) { 169 DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices"); 170 ret = -ENOMEM; 171 goto cleanup; 172 } 173 174 dev->device.bus = &rte_dpaa_bus.bus; 175 176 cfg = &dpaa_netcfg->port_cfg[i]; 177 fman_intf = cfg->fman_if; 178 179 /* Device identifiers */ 180 dev->id.fman_id = fman_intf->fman_idx + 1; 181 dev->id.mac_id = fman_intf->mac_idx; 182 dev->device_type = FSL_DPAA_ETH; 183 dev->id.dev_id = i; 184 185 /* Create device name */ 186 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN); 187 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1), 188 fman_intf->mac_idx); 189 DPAA_BUS_LOG(INFO, "%s netdev added", dev->name); 190 dev->device.name = dev->name; 191 dev->device.devargs = dpaa_devargs_lookup(dev); 192 193 dpaa_add_to_device_list(dev); 194 } 195 196 rte_dpaa_bus.device_count = i; 197 198 /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are 199 * constantly created only if "sec" property is found in the device 200 * tree. Logically there is no limit for number of devices (QI 201 * interfaces) that can be created. 202 */ 203 204 if (dpaa_sec_available()) { 205 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available"); 206 return 0; 207 } 208 209 /* Creating SEC Devices */ 210 for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) { 211 dev = calloc(1, sizeof(struct rte_dpaa_device)); 212 if (!dev) { 213 DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices"); 214 ret = -1; 215 goto cleanup; 216 } 217 218 dev->device_type = FSL_DPAA_CRYPTO; 219 dev->id.dev_id = rte_dpaa_bus.device_count + i; 220 221 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of 222 * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size 223 * allocated for dev->name/ 224 */ 225 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN); 226 sprintf(dev->name, "dpaa_sec-%d", i+1); 227 DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name); 228 dev->device.name = dev->name; 229 dev->device.devargs = dpaa_devargs_lookup(dev); 230 231 dpaa_add_to_device_list(dev); 232 } 233 234 rte_dpaa_bus.device_count += i; 235 236 return 0; 237 238 cleanup: 239 dpaa_clean_device_list(); 240 return ret; 241 } 242 243 static void 244 dpaa_clean_device_list(void) 245 { 246 struct rte_dpaa_device *dev = NULL; 247 struct rte_dpaa_device *tdev = NULL; 248 249 TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) { 250 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next); 251 free(dev); 252 dev = NULL; 253 } 254 } 255 256 int rte_dpaa_portal_init(void *arg) 257 { 258 static const struct rte_mbuf_dynfield dpaa_seqn_dynfield_desc = { 259 .name = DPAA_SEQN_DYNFIELD_NAME, 260 .size = sizeof(dpaa_seqn_t), 261 .align = __alignof__(dpaa_seqn_t), 262 }; 263 unsigned int cpu, lcore = rte_lcore_id(); 264 int ret; 265 266 BUS_INIT_FUNC_TRACE(); 267 268 if ((size_t)arg == 1 || lcore == LCORE_ID_ANY) 269 lcore = rte_get_main_lcore(); 270 else 271 if (lcore >= RTE_MAX_LCORE) 272 return -1; 273 274 cpu = rte_lcore_to_cpu_id(lcore); 275 276 dpaa_seqn_dynfield_offset = 277 rte_mbuf_dynfield_register(&dpaa_seqn_dynfield_desc); 278 if (dpaa_seqn_dynfield_offset < 0) { 279 DPAA_BUS_LOG(ERR, "Failed to register mbuf field for dpaa sequence number\n"); 280 return -rte_errno; 281 } 282 283 /* Initialise bman thread portals */ 284 ret = bman_thread_init(); 285 if (ret) { 286 DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u" 287 " (lcore=%u) with ret: %d", cpu, lcore, ret); 288 return ret; 289 } 290 291 DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d", 292 cpu, lcore); 293 294 /* Initialise qman thread portals */ 295 ret = qman_thread_init(); 296 if (ret) { 297 DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u" 298 " (lcore=%u) with ret: %d", cpu, lcore, ret); 299 bman_thread_finish(); 300 return ret; 301 } 302 303 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d", 304 cpu, lcore); 305 306 DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal), 307 RTE_CACHE_LINE_SIZE); 308 if (!DPAA_PER_LCORE_PORTAL) { 309 DPAA_BUS_LOG(ERR, "Unable to allocate memory"); 310 bman_thread_finish(); 311 qman_thread_finish(); 312 return -ENOMEM; 313 } 314 315 DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index(); 316 DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index(); 317 DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid); 318 319 ret = pthread_setspecific(dpaa_portal_key, 320 (void *)DPAA_PER_LCORE_PORTAL); 321 if (ret) { 322 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u" 323 " (lcore=%u) with ret: %d", cpu, lcore, ret); 324 dpaa_portal_finish(NULL); 325 326 return ret; 327 } 328 329 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized"); 330 331 return 0; 332 } 333 334 int 335 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq) 336 { 337 /* Affine above created portal with channel*/ 338 u32 sdqcr; 339 int ret; 340 341 if (unlikely(!DPAA_PER_LCORE_PORTAL)) { 342 ret = rte_dpaa_portal_init(arg); 343 if (ret < 0) { 344 DPAA_BUS_LOG(ERR, "portal initialization failure"); 345 return ret; 346 } 347 } 348 349 /* Initialise qman specific portals */ 350 ret = fsl_qman_fq_portal_init(fq->qp); 351 if (ret) { 352 DPAA_BUS_LOG(ERR, "Unable to init fq portal"); 353 return -1; 354 } 355 356 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id); 357 qman_static_dequeue_add(sdqcr, fq->qp); 358 359 return 0; 360 } 361 362 int rte_dpaa_portal_fq_close(struct qman_fq *fq) 363 { 364 return fsl_qman_fq_portal_destroy(fq->qp); 365 } 366 367 void 368 dpaa_portal_finish(void *arg) 369 { 370 struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg; 371 372 if (!dpaa_io_portal) { 373 DPAA_BUS_LOG(DEBUG, "Portal already cleaned"); 374 return; 375 } 376 377 bman_thread_finish(); 378 qman_thread_finish(); 379 380 pthread_setspecific(dpaa_portal_key, NULL); 381 382 rte_free(dpaa_io_portal); 383 dpaa_io_portal = NULL; 384 DPAA_PER_LCORE_PORTAL = NULL; 385 } 386 387 static int 388 rte_dpaa_bus_parse(const char *name, void *out) 389 { 390 unsigned int i, j; 391 size_t delta; 392 393 /* There are two ways of passing device name, with and without 394 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3" 395 * without separator. Both need to be handled. 396 * It is also possible that "name=fm1-mac3" is passed along. 397 */ 398 DPAA_BUS_DEBUG("Parse device name (%s)", name); 399 400 delta = 0; 401 if (strncmp(name, "dpaa_bus:", 9) == 0) { 402 delta = 9; 403 } else if (strncmp(name, "name=", 5) == 0) { 404 delta = 5; 405 } 406 407 if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 || 408 i >= 2 || j >= 16) { 409 return -EINVAL; 410 } 411 412 if (out != NULL) { 413 char *out_name = out; 414 const size_t max_name_len = sizeof("fm.-mac..") - 1; 415 416 /* Do not check for truncation, either name ends with 417 * '\0' or the device name is followed by parameters and there 418 * will be a ',' instead. Not copying past this comma is not an 419 * error. 420 */ 421 strlcpy(out_name, &name[delta], max_name_len + 1); 422 423 /* Second digit of mac%u could instead be ','. */ 424 if ((strlen(out_name) == max_name_len) && 425 out_name[max_name_len] == ',') 426 out_name[max_name_len] = '\0'; 427 } 428 429 return 0; 430 } 431 432 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa" 433 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa" 434 435 static int 436 rte_dpaa_bus_scan(void) 437 { 438 int ret; 439 440 BUS_INIT_FUNC_TRACE(); 441 442 if ((access(DPAA_DEV_PATH1, F_OK) != 0) && 443 (access(DPAA_DEV_PATH2, F_OK) != 0)) { 444 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n"); 445 return 0; 446 } 447 448 if (rte_dpaa_bus.detected) 449 return 0; 450 451 rte_dpaa_bus.detected = 1; 452 453 /* create the key, supplying a function that'll be invoked 454 * when a portal affined thread will be deleted. 455 */ 456 ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish); 457 if (ret) { 458 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret); 459 dpaa_clean_device_list(); 460 return ret; 461 } 462 463 return 0; 464 } 465 466 /* register a dpaa bus based dpaa driver */ 467 void 468 rte_dpaa_driver_register(struct rte_dpaa_driver *driver) 469 { 470 RTE_VERIFY(driver); 471 472 BUS_INIT_FUNC_TRACE(); 473 474 TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next); 475 /* Update Bus references */ 476 driver->dpaa_bus = &rte_dpaa_bus; 477 } 478 479 /* un-register a dpaa bus based dpaa driver */ 480 void 481 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver) 482 { 483 struct rte_dpaa_bus *dpaa_bus; 484 485 BUS_INIT_FUNC_TRACE(); 486 487 dpaa_bus = driver->dpaa_bus; 488 489 TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next); 490 /* Update Bus references */ 491 driver->dpaa_bus = NULL; 492 } 493 494 static int 495 rte_dpaa_device_match(struct rte_dpaa_driver *drv, 496 struct rte_dpaa_device *dev) 497 { 498 if (!drv || !dev) { 499 DPAA_BUS_DEBUG("Invalid drv or dev received."); 500 return -1; 501 } 502 503 if (drv->drv_type == dev->device_type) 504 return 0; 505 506 return -1; 507 } 508 509 static int 510 rte_dpaa_bus_dev_build(void) 511 { 512 int ret; 513 514 /* Load the device-tree driver */ 515 ret = of_init(); 516 if (ret) { 517 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret); 518 return -1; 519 } 520 521 /* Get the interface configurations from device-tree */ 522 dpaa_netcfg = netcfg_acquire(); 523 if (!dpaa_netcfg) { 524 DPAA_BUS_LOG(ERR, 525 "netcfg failed: /dev/fsl_usdpaa device not available"); 526 DPAA_BUS_WARN( 527 "Check if you are using USDPAA based device tree"); 528 return -EINVAL; 529 } 530 531 RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n"); 532 533 if (!dpaa_netcfg->num_ethports) { 534 DPAA_BUS_LOG(INFO, "NO DPDK mapped net interfaces available"); 535 /* This is not an error */ 536 } 537 538 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER 539 dump_netcfg(dpaa_netcfg); 540 #endif 541 542 DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d", 543 dpaa_netcfg->num_ethports); 544 ret = dpaa_create_device_list(); 545 if (ret) { 546 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret); 547 return ret; 548 } 549 return 0; 550 } 551 552 static int rte_dpaa_setup_intr(struct rte_intr_handle *intr_handle) 553 { 554 int fd; 555 556 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); 557 if (fd < 0) { 558 DPAA_BUS_ERR("Cannot set up eventfd, error %i (%s)", 559 errno, strerror(errno)); 560 return errno; 561 } 562 563 intr_handle->fd = fd; 564 intr_handle->type = RTE_INTR_HANDLE_EXT; 565 566 return 0; 567 } 568 569 static int 570 rte_dpaa_bus_probe(void) 571 { 572 int ret = -1; 573 struct rte_dpaa_device *dev; 574 struct rte_dpaa_driver *drv; 575 FILE *svr_file = NULL; 576 unsigned int svr_ver; 577 int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST; 578 static int process_once; 579 580 /* If DPAA bus is not present nothing needs to be done */ 581 if (!rte_dpaa_bus.detected) 582 return 0; 583 584 /* Device list creation is only done once */ 585 if (!process_once) { 586 rte_dpaa_bus_dev_build(); 587 /* One time load of Qman/Bman drivers */ 588 ret = qman_global_init(); 589 if (ret) { 590 DPAA_BUS_ERR("QMAN initialization failed: %d", 591 ret); 592 return ret; 593 } 594 ret = bman_global_init(); 595 if (ret) { 596 DPAA_BUS_ERR("BMAN initialization failed: %d", 597 ret); 598 return ret; 599 } 600 } 601 process_once = 1; 602 603 /* If no device present on DPAA bus nothing needs to be done */ 604 if (TAILQ_EMPTY(&rte_dpaa_bus.device_list)) 605 return 0; 606 607 svr_file = fopen(DPAA_SOC_ID_FILE, "r"); 608 if (svr_file) { 609 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0) 610 dpaa_svr_family = svr_ver & SVR_MASK; 611 fclose(svr_file); 612 } 613 614 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) { 615 if (dev->device_type == FSL_DPAA_ETH) { 616 ret = rte_dpaa_setup_intr(&dev->intr_handle); 617 if (ret) 618 DPAA_BUS_ERR("Error setting up interrupt.\n"); 619 } 620 } 621 622 /* And initialize the PA->VA translation table */ 623 dpaax_iova_table_populate(); 624 625 /* For each registered driver, and device, call the driver->probe */ 626 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) { 627 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) { 628 ret = rte_dpaa_device_match(drv, dev); 629 if (ret) 630 continue; 631 632 if (rte_dev_is_probed(&dev->device)) 633 continue; 634 635 if (!drv->probe || 636 (dev->device.devargs && 637 dev->device.devargs->policy == RTE_DEV_BLOCKED)) 638 continue; 639 640 if (probe_all || 641 (dev->device.devargs && 642 dev->device.devargs->policy == RTE_DEV_ALLOWED)) { 643 ret = drv->probe(drv, dev); 644 if (ret) { 645 DPAA_BUS_ERR("unable to probe:%s", 646 dev->name); 647 } else { 648 dev->driver = drv; 649 dev->device.driver = &drv->driver; 650 } 651 } 652 break; 653 } 654 } 655 656 /* Register DPAA mempool ops only if any DPAA device has 657 * been detected. 658 */ 659 rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME); 660 661 return 0; 662 } 663 664 static struct rte_device * 665 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 666 const void *data) 667 { 668 struct rte_dpaa_device *dev; 669 const struct rte_dpaa_device *dstart; 670 671 /* find_device is called with 'data' as an opaque object - just call 672 * cmp with this and each device object on bus. 673 */ 674 675 if (start != NULL) { 676 dstart = RTE_DEV_TO_DPAA_CONST(start); 677 dev = TAILQ_NEXT(dstart, next); 678 } else { 679 dev = TAILQ_FIRST(&rte_dpaa_bus.device_list); 680 } 681 682 while (dev != NULL) { 683 if (cmp(&dev->device, data) == 0) { 684 DPAA_BUS_DEBUG("Found dev=(%s)\n", dev->device.name); 685 return &dev->device; 686 } 687 dev = TAILQ_NEXT(dev, next); 688 } 689 690 DPAA_BUS_DEBUG("Unable to find any device\n"); 691 return NULL; 692 } 693 694 /* 695 * Get iommu class of DPAA2 devices on the bus. 696 */ 697 static enum rte_iova_mode 698 rte_dpaa_get_iommu_class(void) 699 { 700 if ((access(DPAA_DEV_PATH1, F_OK) != 0) && 701 (access(DPAA_DEV_PATH2, F_OK) != 0)) { 702 return RTE_IOVA_DC; 703 } 704 return RTE_IOVA_PA; 705 } 706 707 static int 708 dpaa_bus_plug(struct rte_device *dev __rte_unused) 709 { 710 /* No operation is performed while plugging the device */ 711 return 0; 712 } 713 714 static int 715 dpaa_bus_unplug(struct rte_device *dev __rte_unused) 716 { 717 /* No operation is performed while unplugging the device */ 718 return 0; 719 } 720 721 static void * 722 dpaa_bus_dev_iterate(const void *start, const char *str, 723 const struct rte_dev_iterator *it __rte_unused) 724 { 725 const struct rte_dpaa_device *dstart; 726 struct rte_dpaa_device *dev; 727 char *dup, *dev_name = NULL; 728 729 if (str == NULL) { 730 DPAA_BUS_DEBUG("No device string"); 731 return NULL; 732 } 733 734 /* Expectation is that device would be name=device_name */ 735 if (strncmp(str, "name=", 5) != 0) { 736 DPAA_BUS_DEBUG("Invalid device string (%s)\n", str); 737 return NULL; 738 } 739 740 /* Now that name=device_name format is available, split */ 741 dup = strdup(str); 742 dev_name = dup + strlen("name="); 743 744 if (start != NULL) { 745 dstart = RTE_DEV_TO_DPAA_CONST(start); 746 dev = TAILQ_NEXT(dstart, next); 747 } else { 748 dev = TAILQ_FIRST(&rte_dpaa_bus.device_list); 749 } 750 751 while (dev != NULL) { 752 if (strcmp(dev->device.name, dev_name) == 0) { 753 free(dup); 754 return &dev->device; 755 } 756 dev = TAILQ_NEXT(dev, next); 757 } 758 759 free(dup); 760 return NULL; 761 } 762 763 static struct rte_dpaa_bus rte_dpaa_bus = { 764 .bus = { 765 .scan = rte_dpaa_bus_scan, 766 .probe = rte_dpaa_bus_probe, 767 .parse = rte_dpaa_bus_parse, 768 .find_device = rte_dpaa_find_device, 769 .get_iommu_class = rte_dpaa_get_iommu_class, 770 .plug = dpaa_bus_plug, 771 .unplug = dpaa_bus_unplug, 772 .dev_iterate = dpaa_bus_dev_iterate, 773 }, 774 .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list), 775 .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list), 776 .device_count = 0, 777 }; 778 779 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus); 780 RTE_LOG_REGISTER(dpaa_logtype_bus, bus.dpaa, NOTICE); 781