1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016 RehiveTech. All rights reserved. 3 */ 4 5 #include <string.h> 6 #include <inttypes.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <stdint.h> 10 #include <stdbool.h> 11 #include <sys/queue.h> 12 13 #include <rte_eal.h> 14 #include <rte_dev.h> 15 #include <rte_bus.h> 16 #include <rte_common.h> 17 #include <rte_devargs.h> 18 #include <rte_memory.h> 19 #include <rte_tailq.h> 20 #include <rte_spinlock.h> 21 #include <rte_string_fns.h> 22 #include <rte_errno.h> 23 24 #include "rte_bus_vdev.h" 25 #include "vdev_logs.h" 26 #include "vdev_private.h" 27 28 #define VDEV_MP_KEY "bus_vdev_mp" 29 30 /* Forward declare to access virtual bus name */ 31 static struct rte_bus rte_vdev_bus; 32 33 /** Double linked list of virtual device drivers. */ 34 TAILQ_HEAD(vdev_device_list, rte_vdev_device); 35 36 static struct vdev_device_list vdev_device_list = 37 TAILQ_HEAD_INITIALIZER(vdev_device_list); 38 /* The lock needs to be recursive because a vdev can manage another vdev. */ 39 static rte_spinlock_recursive_t vdev_device_list_lock = 40 RTE_SPINLOCK_RECURSIVE_INITIALIZER; 41 42 static struct vdev_driver_list vdev_driver_list = 43 TAILQ_HEAD_INITIALIZER(vdev_driver_list); 44 45 struct vdev_custom_scan { 46 TAILQ_ENTRY(vdev_custom_scan) next; 47 rte_vdev_scan_callback callback; 48 void *user_arg; 49 }; 50 TAILQ_HEAD(vdev_custom_scans, vdev_custom_scan); 51 static struct vdev_custom_scans vdev_custom_scans = 52 TAILQ_HEAD_INITIALIZER(vdev_custom_scans); 53 static rte_spinlock_t vdev_custom_scan_lock = RTE_SPINLOCK_INITIALIZER; 54 55 /* register a driver */ 56 void 57 rte_vdev_register(struct rte_vdev_driver *driver) 58 { 59 TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next); 60 } 61 62 /* unregister a driver */ 63 void 64 rte_vdev_unregister(struct rte_vdev_driver *driver) 65 { 66 TAILQ_REMOVE(&vdev_driver_list, driver, next); 67 } 68 69 int 70 rte_vdev_add_custom_scan(rte_vdev_scan_callback callback, void *user_arg) 71 { 72 struct vdev_custom_scan *custom_scan; 73 74 rte_spinlock_lock(&vdev_custom_scan_lock); 75 76 /* check if already registered */ 77 TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) { 78 if (custom_scan->callback == callback && 79 custom_scan->user_arg == user_arg) 80 break; 81 } 82 83 if (custom_scan == NULL) { 84 custom_scan = malloc(sizeof(struct vdev_custom_scan)); 85 if (custom_scan != NULL) { 86 custom_scan->callback = callback; 87 custom_scan->user_arg = user_arg; 88 TAILQ_INSERT_TAIL(&vdev_custom_scans, custom_scan, next); 89 } 90 } 91 92 rte_spinlock_unlock(&vdev_custom_scan_lock); 93 94 return (custom_scan == NULL) ? -1 : 0; 95 } 96 97 int 98 rte_vdev_remove_custom_scan(rte_vdev_scan_callback callback, void *user_arg) 99 { 100 struct vdev_custom_scan *custom_scan, *tmp_scan; 101 102 rte_spinlock_lock(&vdev_custom_scan_lock); 103 TAILQ_FOREACH_SAFE(custom_scan, &vdev_custom_scans, next, tmp_scan) { 104 if (custom_scan->callback != callback || 105 (custom_scan->user_arg != (void *)-1 && 106 custom_scan->user_arg != user_arg)) 107 continue; 108 TAILQ_REMOVE(&vdev_custom_scans, custom_scan, next); 109 free(custom_scan); 110 } 111 rte_spinlock_unlock(&vdev_custom_scan_lock); 112 113 return 0; 114 } 115 116 static int 117 vdev_parse(const char *name, void *addr) 118 { 119 struct rte_vdev_driver **out = addr; 120 struct rte_vdev_driver *driver = NULL; 121 122 TAILQ_FOREACH(driver, &vdev_driver_list, next) { 123 if (strncmp(driver->driver.name, name, 124 strlen(driver->driver.name)) == 0) 125 break; 126 if (driver->driver.alias && 127 strncmp(driver->driver.alias, name, 128 strlen(driver->driver.alias)) == 0) 129 break; 130 } 131 if (driver != NULL && 132 addr != NULL) 133 *out = driver; 134 return driver == NULL; 135 } 136 137 static int 138 vdev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 139 { 140 struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev); 141 const struct rte_vdev_driver *driver; 142 143 if (!vdev) { 144 rte_errno = EINVAL; 145 return -1; 146 } 147 148 if (!vdev->device.driver) { 149 VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name); 150 return 1; 151 } 152 153 driver = container_of(vdev->device.driver, const struct rte_vdev_driver, 154 driver); 155 156 if (driver->dma_map) 157 return driver->dma_map(vdev, addr, iova, len); 158 159 return 0; 160 } 161 162 static int 163 vdev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len) 164 { 165 struct rte_vdev_device *vdev = RTE_DEV_TO_VDEV(dev); 166 const struct rte_vdev_driver *driver; 167 168 if (!vdev) { 169 rte_errno = EINVAL; 170 return -1; 171 } 172 173 if (!vdev->device.driver) { 174 VDEV_LOG(DEBUG, "no driver attach to device %s", dev->name); 175 return 1; 176 } 177 178 driver = container_of(vdev->device.driver, const struct rte_vdev_driver, 179 driver); 180 181 if (driver->dma_unmap) 182 return driver->dma_unmap(vdev, addr, iova, len); 183 184 return 0; 185 } 186 187 static int 188 vdev_probe_all_drivers(struct rte_vdev_device *dev) 189 { 190 const char *name; 191 struct rte_vdev_driver *driver; 192 int ret; 193 194 if (rte_dev_is_probed(&dev->device)) 195 return -EEXIST; 196 197 name = rte_vdev_device_name(dev); 198 VDEV_LOG(DEBUG, "Search driver to probe device %s", name); 199 200 if (vdev_parse(name, &driver)) 201 return -1; 202 ret = driver->probe(dev); 203 if (ret == 0) 204 dev->device.driver = &driver->driver; 205 return ret; 206 } 207 208 /* The caller shall be responsible for thread-safe */ 209 static struct rte_vdev_device * 210 find_vdev(const char *name) 211 { 212 struct rte_vdev_device *dev; 213 214 if (!name) 215 return NULL; 216 217 TAILQ_FOREACH(dev, &vdev_device_list, next) { 218 const char *devname = rte_vdev_device_name(dev); 219 220 if (!strcmp(devname, name)) 221 return dev; 222 } 223 224 return NULL; 225 } 226 227 static struct rte_devargs * 228 alloc_devargs(const char *name, const char *args) 229 { 230 struct rte_devargs *devargs; 231 int ret; 232 233 devargs = calloc(1, sizeof(*devargs)); 234 if (!devargs) 235 return NULL; 236 237 devargs->bus = &rte_vdev_bus; 238 if (args) 239 devargs->args = strdup(args); 240 else 241 devargs->args = strdup(""); 242 243 ret = strlcpy(devargs->name, name, sizeof(devargs->name)); 244 if (ret < 0 || ret >= (int)sizeof(devargs->name)) { 245 free(devargs->args); 246 free(devargs); 247 return NULL; 248 } 249 250 return devargs; 251 } 252 253 static int 254 insert_vdev(const char *name, const char *args, 255 struct rte_vdev_device **p_dev, 256 bool init) 257 { 258 struct rte_vdev_device *dev; 259 struct rte_devargs *devargs; 260 int ret; 261 262 if (name == NULL) 263 return -EINVAL; 264 265 devargs = alloc_devargs(name, args); 266 if (!devargs) 267 return -ENOMEM; 268 269 dev = calloc(1, sizeof(*dev)); 270 if (!dev) { 271 ret = -ENOMEM; 272 goto fail; 273 } 274 275 dev->device.bus = &rte_vdev_bus; 276 dev->device.numa_node = SOCKET_ID_ANY; 277 dev->device.name = devargs->name; 278 279 if (find_vdev(name)) { 280 /* 281 * A vdev is expected to have only one port. 282 * So there is no reason to try probing again, 283 * even with new arguments. 284 */ 285 ret = -EEXIST; 286 goto fail; 287 } 288 289 if (init) 290 rte_devargs_insert(&devargs); 291 dev->device.devargs = devargs; 292 TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); 293 294 if (p_dev) 295 *p_dev = dev; 296 297 return 0; 298 fail: 299 free(devargs->args); 300 free(devargs); 301 free(dev); 302 return ret; 303 } 304 305 int 306 rte_vdev_init(const char *name, const char *args) 307 { 308 struct rte_vdev_device *dev; 309 int ret; 310 311 rte_spinlock_recursive_lock(&vdev_device_list_lock); 312 ret = insert_vdev(name, args, &dev, true); 313 if (ret == 0) { 314 ret = vdev_probe_all_drivers(dev); 315 if (ret) { 316 if (ret > 0) 317 VDEV_LOG(ERR, "no driver found for %s", name); 318 /* If fails, remove it from vdev list */ 319 TAILQ_REMOVE(&vdev_device_list, dev, next); 320 rte_devargs_remove(dev->device.devargs); 321 free(dev); 322 } 323 } 324 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 325 return ret; 326 } 327 328 static int 329 vdev_remove_driver(struct rte_vdev_device *dev) 330 { 331 const char *name = rte_vdev_device_name(dev); 332 const struct rte_vdev_driver *driver; 333 334 if (!dev->device.driver) { 335 VDEV_LOG(DEBUG, "no driver attach to device %s", name); 336 return 1; 337 } 338 339 driver = container_of(dev->device.driver, const struct rte_vdev_driver, 340 driver); 341 return driver->remove(dev); 342 } 343 344 int 345 rte_vdev_uninit(const char *name) 346 { 347 struct rte_vdev_device *dev; 348 int ret; 349 350 if (name == NULL) 351 return -EINVAL; 352 353 rte_spinlock_recursive_lock(&vdev_device_list_lock); 354 355 dev = find_vdev(name); 356 if (!dev) { 357 ret = -ENOENT; 358 goto unlock; 359 } 360 361 ret = vdev_remove_driver(dev); 362 if (ret) 363 goto unlock; 364 365 TAILQ_REMOVE(&vdev_device_list, dev, next); 366 rte_devargs_remove(dev->device.devargs); 367 free(dev); 368 369 unlock: 370 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 371 return ret; 372 } 373 374 struct vdev_param { 375 #define VDEV_SCAN_REQ 1 376 #define VDEV_SCAN_ONE 2 377 #define VDEV_SCAN_REP 3 378 int type; 379 int num; 380 char name[RTE_DEV_NAME_MAX_LEN]; 381 }; 382 383 static int vdev_plug(struct rte_device *dev); 384 385 /** 386 * This function works as the action for both primary and secondary process 387 * for static vdev discovery when a secondary process is booting. 388 * 389 * step 1, secondary process sends a sync request to ask for vdev in primary; 390 * step 2, primary process receives the request, and send vdevs one by one; 391 * step 3, primary process sends back reply, which indicates how many vdevs 392 * are sent. 393 */ 394 static int 395 vdev_action(const struct rte_mp_msg *mp_msg, const void *peer) 396 { 397 struct rte_vdev_device *dev; 398 struct rte_mp_msg mp_resp; 399 struct vdev_param *ou = (struct vdev_param *)&mp_resp.param; 400 const struct vdev_param *in = (const struct vdev_param *)mp_msg->param; 401 const char *devname; 402 int num; 403 int ret; 404 405 strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name)); 406 mp_resp.len_param = sizeof(*ou); 407 mp_resp.num_fds = 0; 408 409 switch (in->type) { 410 case VDEV_SCAN_REQ: 411 ou->type = VDEV_SCAN_ONE; 412 ou->num = 1; 413 num = 0; 414 415 rte_spinlock_recursive_lock(&vdev_device_list_lock); 416 TAILQ_FOREACH(dev, &vdev_device_list, next) { 417 devname = rte_vdev_device_name(dev); 418 if (strlen(devname) == 0) { 419 VDEV_LOG(INFO, "vdev with no name is not sent"); 420 continue; 421 } 422 VDEV_LOG(INFO, "send vdev, %s", devname); 423 strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN); 424 if (rte_mp_sendmsg(&mp_resp) < 0) 425 VDEV_LOG(ERR, "send vdev, %s, failed, %s", 426 devname, strerror(rte_errno)); 427 num++; 428 } 429 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 430 431 ou->type = VDEV_SCAN_REP; 432 ou->num = num; 433 if (rte_mp_reply(&mp_resp, peer) < 0) 434 VDEV_LOG(ERR, "Failed to reply a scan request"); 435 break; 436 case VDEV_SCAN_ONE: 437 VDEV_LOG(INFO, "receive vdev, %s", in->name); 438 ret = insert_vdev(in->name, NULL, NULL, false); 439 if (ret == -EEXIST) 440 VDEV_LOG(DEBUG, "device already exist, %s", in->name); 441 else if (ret < 0) 442 VDEV_LOG(ERR, "failed to add vdev, %s", in->name); 443 break; 444 default: 445 VDEV_LOG(ERR, "vdev cannot recognize this message"); 446 } 447 448 return 0; 449 } 450 451 static int 452 vdev_scan(void) 453 { 454 struct rte_vdev_device *dev; 455 struct rte_devargs *devargs; 456 struct vdev_custom_scan *custom_scan; 457 458 if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 && 459 rte_errno != EEXIST) { 460 /* for primary, unsupported IPC is not an error */ 461 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 462 rte_errno == ENOTSUP) 463 goto scan; 464 VDEV_LOG(ERR, "Failed to add vdev mp action"); 465 return -1; 466 } 467 468 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 469 struct rte_mp_msg mp_req, *mp_rep; 470 struct rte_mp_reply mp_reply; 471 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0}; 472 struct vdev_param *req = (struct vdev_param *)mp_req.param; 473 struct vdev_param *resp; 474 475 strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name)); 476 mp_req.len_param = sizeof(*req); 477 mp_req.num_fds = 0; 478 req->type = VDEV_SCAN_REQ; 479 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 && 480 mp_reply.nb_received == 1) { 481 mp_rep = &mp_reply.msgs[0]; 482 resp = (struct vdev_param *)mp_rep->param; 483 VDEV_LOG(INFO, "Received %d vdevs", resp->num); 484 free(mp_reply.msgs); 485 } else 486 VDEV_LOG(ERR, "Failed to request vdev from primary"); 487 488 /* Fall through to allow private vdevs in secondary process */ 489 } 490 491 scan: 492 /* call custom scan callbacks if any */ 493 rte_spinlock_lock(&vdev_custom_scan_lock); 494 TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) { 495 if (custom_scan->callback != NULL) 496 /* 497 * the callback should update devargs list 498 * by calling rte_devargs_insert() with 499 * devargs.bus = rte_bus_find_by_name("vdev"); 500 * devargs.type = RTE_DEVTYPE_VIRTUAL; 501 * devargs.policy = RTE_DEV_WHITELISTED; 502 */ 503 custom_scan->callback(custom_scan->user_arg); 504 } 505 rte_spinlock_unlock(&vdev_custom_scan_lock); 506 507 /* for virtual devices we scan the devargs_list populated via cmdline */ 508 RTE_EAL_DEVARGS_FOREACH("vdev", devargs) { 509 510 dev = calloc(1, sizeof(*dev)); 511 if (!dev) 512 return -1; 513 514 rte_spinlock_recursive_lock(&vdev_device_list_lock); 515 516 if (find_vdev(devargs->name)) { 517 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 518 free(dev); 519 continue; 520 } 521 522 dev->device.bus = &rte_vdev_bus; 523 dev->device.devargs = devargs; 524 dev->device.numa_node = SOCKET_ID_ANY; 525 dev->device.name = devargs->name; 526 527 TAILQ_INSERT_TAIL(&vdev_device_list, dev, next); 528 529 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 530 } 531 532 return 0; 533 } 534 535 static int 536 vdev_probe(void) 537 { 538 struct rte_vdev_device *dev; 539 int r, ret = 0; 540 541 /* call the init function for each virtual device */ 542 TAILQ_FOREACH(dev, &vdev_device_list, next) { 543 /* we don't use the vdev lock here, as it's only used in DPDK 544 * initialization; and we don't want to hold such a lock when 545 * we call each driver probe. 546 */ 547 548 r = vdev_probe_all_drivers(dev); 549 if (r != 0) { 550 if (r == -EEXIST) 551 continue; 552 VDEV_LOG(ERR, "failed to initialize %s device", 553 rte_vdev_device_name(dev)); 554 ret = -1; 555 } 556 } 557 558 return ret; 559 } 560 561 struct rte_device * 562 rte_vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp, 563 const void *data) 564 { 565 const struct rte_vdev_device *vstart; 566 struct rte_vdev_device *dev; 567 568 rte_spinlock_recursive_lock(&vdev_device_list_lock); 569 if (start != NULL) { 570 vstart = RTE_DEV_TO_VDEV_CONST(start); 571 dev = TAILQ_NEXT(vstart, next); 572 } else { 573 dev = TAILQ_FIRST(&vdev_device_list); 574 } 575 while (dev != NULL) { 576 if (cmp(&dev->device, data) == 0) 577 break; 578 dev = TAILQ_NEXT(dev, next); 579 } 580 rte_spinlock_recursive_unlock(&vdev_device_list_lock); 581 582 return dev ? &dev->device : NULL; 583 } 584 585 static int 586 vdev_plug(struct rte_device *dev) 587 { 588 return vdev_probe_all_drivers(RTE_DEV_TO_VDEV(dev)); 589 } 590 591 static int 592 vdev_unplug(struct rte_device *dev) 593 { 594 return rte_vdev_uninit(dev->name); 595 } 596 597 static struct rte_bus rte_vdev_bus = { 598 .scan = vdev_scan, 599 .probe = vdev_probe, 600 .find_device = rte_vdev_find_device, 601 .plug = vdev_plug, 602 .unplug = vdev_unplug, 603 .parse = vdev_parse, 604 .dma_map = vdev_dma_map, 605 .dma_unmap = vdev_dma_unmap, 606 .dev_iterate = rte_vdev_dev_iterate, 607 }; 608 609 RTE_REGISTER_BUS(vdev, rte_vdev_bus); 610 RTE_LOG_REGISTER(vdev_logtype_bus, bus.vdev, NOTICE); 611