1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 */ 7 8 /* 9 * NVMe transport abstraction 10 */ 11 12 #include "nvme_internal.h" 13 #include "spdk/queue.h" 14 15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16 16 17 struct spdk_nvme_transport { 18 struct spdk_nvme_transport_ops ops; 19 TAILQ_ENTRY(spdk_nvme_transport) link; 20 }; 21 22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports = 23 TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports); 24 25 static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {}; 26 static int g_current_transport_index = 0; 27 28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = { 29 .rdma_srq_size = 0, 30 .rdma_max_cq_size = 0, 31 .rdma_cm_event_timeout_ms = 1000 32 }; 33 34 const struct spdk_nvme_transport * 35 nvme_get_first_transport(void) 36 { 37 return TAILQ_FIRST(&g_spdk_nvme_transports); 38 } 39 40 const struct spdk_nvme_transport * 41 nvme_get_next_transport(const struct spdk_nvme_transport *transport) 42 { 43 return TAILQ_NEXT(transport, link); 44 } 45 46 /* 47 * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the 48 * transport object in either the controller struct or the admin qpair. This means 49 * that a lot of admin related transport calls will have to call nvme_get_transport 50 * in order to know which functions to call. 51 * In the I/O path, we have the ability to store the transport struct in the I/O 52 * qpairs to avoid taking a performance hit. 53 */ 54 const struct spdk_nvme_transport * 55 nvme_get_transport(const char *transport_name) 56 { 57 struct spdk_nvme_transport *registered_transport; 58 59 TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) { 60 if (strcasecmp(transport_name, registered_transport->ops.name) == 0) { 61 return registered_transport; 62 } 63 } 64 65 return NULL; 66 } 67 68 bool 69 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype) 70 { 71 return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true; 72 } 73 74 bool 75 spdk_nvme_transport_available_by_name(const char *transport_name) 76 { 77 return nvme_get_transport(transport_name) == NULL ? false : true; 78 } 79 80 void 81 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops) 82 { 83 struct spdk_nvme_transport *new_transport; 84 85 if (nvme_get_transport(ops->name)) { 86 SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name); 87 assert(false); 88 } 89 90 if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) { 91 SPDK_ERRLOG("Unable to register new NVMe transport.\n"); 92 assert(false); 93 return; 94 } 95 new_transport = &g_transports[g_current_transport_index++]; 96 97 new_transport->ops = *ops; 98 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link); 99 } 100 101 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid, 102 const struct spdk_nvme_ctrlr_opts *opts, 103 void *devhandle) 104 { 105 const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring); 106 struct spdk_nvme_ctrlr *ctrlr; 107 108 if (transport == NULL) { 109 SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring); 110 return NULL; 111 } 112 113 ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle); 114 115 return ctrlr; 116 } 117 118 int 119 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, 120 bool direct_connect) 121 { 122 const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring); 123 124 if (transport == NULL) { 125 SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring); 126 return -ENOENT; 127 } 128 129 return transport->ops.ctrlr_scan(probe_ctx, direct_connect); 130 } 131 132 int 133 nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx) 134 { 135 const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring); 136 137 if (transport == NULL) { 138 SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring); 139 return -ENOENT; 140 } 141 142 if (transport->ops.ctrlr_scan_attached != NULL) { 143 return transport->ops.ctrlr_scan_attached(probe_ctx); 144 } 145 SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n", 146 probe_ctx->trid.trstring); 147 return -ENOTSUP; 148 } 149 150 int 151 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) 152 { 153 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 154 155 assert(transport != NULL); 156 return transport->ops.ctrlr_destruct(ctrlr); 157 } 158 159 int 160 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr) 161 { 162 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 163 164 assert(transport != NULL); 165 return transport->ops.ctrlr_enable(ctrlr); 166 } 167 168 int 169 nvme_transport_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr) 170 { 171 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 172 173 assert(transport != NULL); 174 if (transport->ops.ctrlr_enable_interrupts != NULL) { 175 return transport->ops.ctrlr_enable_interrupts(ctrlr); 176 } 177 178 return -ENOTSUP; 179 } 180 181 int 182 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr) 183 { 184 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 185 186 assert(transport != NULL); 187 if (transport->ops.ctrlr_ready) { 188 return transport->ops.ctrlr_ready(ctrlr); 189 } 190 191 return 0; 192 } 193 194 int 195 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value) 196 { 197 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 198 199 assert(transport != NULL); 200 return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value); 201 } 202 203 int 204 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value) 205 { 206 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 207 208 assert(transport != NULL); 209 return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value); 210 } 211 212 int 213 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value) 214 { 215 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 216 217 assert(transport != NULL); 218 return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value); 219 } 220 221 int 222 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value) 223 { 224 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 225 226 assert(transport != NULL); 227 return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value); 228 } 229 230 static int 231 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value, 232 spdk_nvme_reg_cb cb_fn, void *cb_ctx) 233 { 234 struct nvme_register_completion *ctx; 235 236 ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE); 237 if (ctx == NULL) { 238 return -ENOMEM; 239 } 240 241 ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 242 ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 243 ctx->cb_fn = cb_fn; 244 ctx->cb_ctx = cb_ctx; 245 ctx->value = value; 246 ctx->pid = getpid(); 247 248 nvme_ctrlr_lock(ctrlr); 249 STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq); 250 nvme_ctrlr_unlock(ctrlr); 251 252 return 0; 253 } 254 255 int 256 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value, 257 spdk_nvme_reg_cb cb_fn, void *cb_arg) 258 { 259 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 260 int rc; 261 262 assert(transport != NULL); 263 if (transport->ops.ctrlr_set_reg_4_async == NULL) { 264 rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value); 265 if (rc != 0) { 266 return rc; 267 } 268 269 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 270 } 271 272 return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg); 273 } 274 275 int 276 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value, 277 spdk_nvme_reg_cb cb_fn, void *cb_arg) 278 279 { 280 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 281 int rc; 282 283 assert(transport != NULL); 284 if (transport->ops.ctrlr_set_reg_8_async == NULL) { 285 rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value); 286 if (rc != 0) { 287 return rc; 288 } 289 290 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 291 } 292 293 return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg); 294 } 295 296 int 297 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, 298 spdk_nvme_reg_cb cb_fn, void *cb_arg) 299 { 300 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 301 uint32_t value; 302 int rc; 303 304 assert(transport != NULL); 305 if (transport->ops.ctrlr_get_reg_4_async == NULL) { 306 rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value); 307 if (rc != 0) { 308 return rc; 309 } 310 311 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 312 } 313 314 return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg); 315 } 316 317 int 318 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, 319 spdk_nvme_reg_cb cb_fn, void *cb_arg) 320 { 321 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 322 uint64_t value; 323 int rc; 324 325 assert(transport != NULL); 326 if (transport->ops.ctrlr_get_reg_8_async == NULL) { 327 rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value); 328 if (rc != 0) { 329 return rc; 330 } 331 332 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 333 } 334 335 return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg); 336 } 337 338 uint32_t 339 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr) 340 { 341 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 342 343 assert(transport != NULL); 344 return transport->ops.ctrlr_get_max_xfer_size(ctrlr); 345 } 346 347 uint16_t 348 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr) 349 { 350 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 351 352 assert(transport != NULL); 353 return transport->ops.ctrlr_get_max_sges(ctrlr); 354 } 355 356 int 357 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr) 358 { 359 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 360 361 assert(transport != NULL); 362 if (transport->ops.ctrlr_reserve_cmb != NULL) { 363 return transport->ops.ctrlr_reserve_cmb(ctrlr); 364 } 365 366 return -ENOTSUP; 367 } 368 369 void * 370 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size) 371 { 372 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 373 374 assert(transport != NULL); 375 if (transport->ops.ctrlr_map_cmb != NULL) { 376 return transport->ops.ctrlr_map_cmb(ctrlr, size); 377 } 378 379 return NULL; 380 } 381 382 int 383 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr) 384 { 385 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 386 387 assert(transport != NULL); 388 if (transport->ops.ctrlr_unmap_cmb != NULL) { 389 return transport->ops.ctrlr_unmap_cmb(ctrlr); 390 } 391 392 return 0; 393 } 394 395 int 396 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr) 397 { 398 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 399 400 assert(transport != NULL); 401 if (transport->ops.ctrlr_enable_pmr != NULL) { 402 return transport->ops.ctrlr_enable_pmr(ctrlr); 403 } 404 405 return -ENOSYS; 406 } 407 408 int 409 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr) 410 { 411 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 412 413 assert(transport != NULL); 414 if (transport->ops.ctrlr_disable_pmr != NULL) { 415 return transport->ops.ctrlr_disable_pmr(ctrlr); 416 } 417 418 return -ENOSYS; 419 } 420 421 void * 422 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size) 423 { 424 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 425 426 assert(transport != NULL); 427 if (transport->ops.ctrlr_map_pmr != NULL) { 428 return transport->ops.ctrlr_map_pmr(ctrlr, size); 429 } 430 431 return NULL; 432 } 433 434 int 435 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr) 436 { 437 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 438 439 assert(transport != NULL); 440 if (transport->ops.ctrlr_unmap_pmr != NULL) { 441 return transport->ops.ctrlr_unmap_pmr(ctrlr); 442 } 443 444 return -ENOSYS; 445 } 446 447 struct spdk_nvme_qpair * 448 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, 449 const struct spdk_nvme_io_qpair_opts *opts) 450 { 451 struct spdk_nvme_qpair *qpair; 452 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 453 454 assert(transport != NULL); 455 qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts); 456 if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) { 457 qpair->transport = transport; 458 } 459 460 return qpair; 461 } 462 463 void 464 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 465 { 466 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 467 int rc; 468 469 assert(transport != NULL); 470 471 /* Do not rely on qpair->transport. For multi-process cases, a foreign process may delete 472 * the IO qpair, in which case the transport object would be invalid (each process has their 473 * own unique transport objects since they contain function pointers). So we look up the 474 * transport object in the delete_io_qpair case. 475 */ 476 rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair); 477 if (rc != 0) { 478 SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n", 479 transport->ops.name); 480 assert(false); 481 } 482 } 483 484 static void 485 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused) 486 { 487 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 488 489 /* If the qpair was unable to reconnect, restore the original failure reason */ 490 qpair->transport_failure_reason = qpair->last_transport_failure_reason; 491 nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair); 492 } 493 494 int 495 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 496 { 497 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 498 int rc; 499 500 assert(transport != NULL); 501 if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) { 502 qpair->transport = transport; 503 } 504 505 qpair->last_transport_failure_reason = qpair->transport_failure_reason; 506 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 507 508 nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING); 509 rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair); 510 if (rc != 0) { 511 goto err; 512 } 513 514 if (qpair->poll_group) { 515 rc = nvme_poll_group_connect_qpair(qpair); 516 if (rc) { 517 goto err; 518 } 519 } 520 521 if (!qpair->async) { 522 /* Busy wait until the qpair exits the connecting state */ 523 while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) { 524 if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) { 525 rc = spdk_nvme_poll_group_process_completions( 526 qpair->poll_group->group, 0, 527 nvme_transport_connect_qpair_fail); 528 } else { 529 rc = spdk_nvme_qpair_process_completions(qpair, 0); 530 } 531 532 if (rc < 0) { 533 goto err; 534 } 535 } 536 } 537 538 return 0; 539 err: 540 nvme_transport_connect_qpair_fail(qpair, NULL); 541 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) { 542 assert(qpair->async == true); 543 /* Let the caller to poll the qpair until it is actually disconnected. */ 544 return 0; 545 } 546 547 return rc; 548 } 549 550 void 551 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 552 { 553 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 554 555 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING || 556 nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) { 557 return; 558 } 559 560 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING); 561 assert(transport != NULL); 562 563 if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) { 564 nvme_poll_group_disconnect_qpair(qpair); 565 } 566 567 transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair); 568 } 569 570 int 571 nvme_transport_qpair_get_fd(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 572 struct spdk_event_handler_opts *opts) 573 { 574 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 575 576 assert(transport != NULL); 577 if (transport->ops.qpair_get_fd != NULL) { 578 return transport->ops.qpair_get_fd(qpair, opts); 579 } 580 581 return -ENOTSUP; 582 } 583 584 void 585 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair) 586 { 587 if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) || 588 nvme_qpair_is_admin_queue(qpair)) { 589 nvme_qpair_abort_all_queued_reqs(qpair); 590 } 591 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 592 593 /* In interrupt mode qpairs that are added to poll group need an event for the 594 * disconnected qpairs handling to kick in. 595 */ 596 if (qpair->poll_group) { 597 nvme_poll_group_write_disconnect_qpair_fd(qpair->poll_group->group); 598 } 599 } 600 601 int 602 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 603 struct spdk_memory_domain **domains, int array_size) 604 { 605 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 606 607 assert(transport != NULL); 608 if (transport->ops.ctrlr_get_memory_domains) { 609 return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size); 610 } 611 612 return 0; 613 } 614 615 void 616 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair) 617 { 618 const struct spdk_nvme_transport *transport; 619 620 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 621 qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr); 622 } else { 623 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 624 assert(transport != NULL); 625 transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr); 626 } 627 } 628 629 int 630 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair) 631 { 632 const struct spdk_nvme_transport *transport; 633 634 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 635 return qpair->transport->ops.qpair_reset(qpair); 636 } 637 638 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 639 assert(transport != NULL); 640 return transport->ops.qpair_reset(qpair); 641 } 642 643 int 644 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 645 { 646 const struct spdk_nvme_transport *transport; 647 648 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 649 return qpair->transport->ops.qpair_submit_request(qpair, req); 650 } 651 652 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 653 assert(transport != NULL); 654 return transport->ops.qpair_submit_request(qpair, req); 655 } 656 657 int32_t 658 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 659 { 660 const struct spdk_nvme_transport *transport; 661 662 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 663 return qpair->transport->ops.qpair_process_completions(qpair, max_completions); 664 } 665 666 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 667 assert(transport != NULL); 668 return transport->ops.qpair_process_completions(qpair, max_completions); 669 } 670 671 int 672 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair, 673 int (*iter_fn)(struct nvme_request *req, void *arg), 674 void *arg) 675 { 676 const struct spdk_nvme_transport *transport; 677 678 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 679 return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg); 680 } 681 682 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 683 assert(transport != NULL); 684 return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg); 685 } 686 687 int 688 nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair) 689 { 690 const struct spdk_nvme_transport *transport; 691 692 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 693 if (transport->ops.qpair_authenticate == NULL) { 694 return -ENOTSUP; 695 } 696 697 return transport->ops.qpair_authenticate(qpair); 698 } 699 700 void 701 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair) 702 { 703 const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 704 705 assert(transport != NULL); 706 transport->ops.admin_qpair_abort_aers(qpair); 707 } 708 709 struct spdk_nvme_transport_poll_group * 710 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport) 711 { 712 struct spdk_nvme_transport_poll_group *group = NULL; 713 714 group = transport->ops.poll_group_create(); 715 if (group) { 716 group->transport = transport; 717 STAILQ_INIT(&group->connected_qpairs); 718 STAILQ_INIT(&group->disconnected_qpairs); 719 group->num_connected_qpairs = 0; 720 } 721 722 return group; 723 } 724 725 struct spdk_nvme_transport_poll_group * 726 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport, 727 struct spdk_nvme_qpair *qpair) 728 { 729 if (transport->ops.qpair_get_optimal_poll_group) { 730 return transport->ops.qpair_get_optimal_poll_group(qpair); 731 } else { 732 return NULL; 733 } 734 } 735 736 int 737 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup, 738 struct spdk_nvme_qpair *qpair) 739 { 740 int rc; 741 742 rc = tgroup->transport->ops.poll_group_add(tgroup, qpair); 743 if (rc == 0) { 744 qpair->poll_group = tgroup; 745 assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED); 746 qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs; 747 STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq); 748 } 749 750 return rc; 751 } 752 753 int 754 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup, 755 struct spdk_nvme_qpair *qpair) 756 { 757 int rc __attribute__((unused)); 758 759 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 760 return -EINVAL; 761 } else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) { 762 return -ENOENT; 763 } 764 765 rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair); 766 assert(rc == 0); 767 768 STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 769 770 qpair->poll_group = NULL; 771 qpair->poll_group_tailq_head = NULL; 772 773 return 0; 774 } 775 776 int64_t 777 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup, 778 uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 779 { 780 return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair, 781 disconnected_qpair_cb); 782 } 783 784 void 785 nvme_transport_poll_group_check_disconnected_qpairs(struct spdk_nvme_transport_poll_group *tgroup, 786 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 787 { 788 return tgroup->transport->ops.poll_group_check_disconnected_qpairs(tgroup, 789 disconnected_qpair_cb); 790 } 791 792 int 793 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup) 794 { 795 return tgroup->transport->ops.poll_group_destroy(tgroup); 796 } 797 798 int 799 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 800 { 801 struct spdk_nvme_transport_poll_group *tgroup; 802 int rc __attribute__((unused)); 803 804 tgroup = qpair->poll_group; 805 806 if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) { 807 return 0; 808 } 809 810 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 811 rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair); 812 assert(rc == 0); 813 814 qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs; 815 STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 816 assert(tgroup->num_connected_qpairs > 0); 817 tgroup->num_connected_qpairs--; 818 STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq); 819 820 return 0; 821 } 822 823 return -EINVAL; 824 } 825 826 int 827 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 828 { 829 struct spdk_nvme_transport_poll_group *tgroup; 830 int rc; 831 832 tgroup = qpair->poll_group; 833 834 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 835 return 0; 836 } 837 838 if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) { 839 rc = tgroup->transport->ops.poll_group_connect_qpair(qpair); 840 if (rc == 0) { 841 qpair->poll_group_tailq_head = &tgroup->connected_qpairs; 842 STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 843 STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq); 844 tgroup->num_connected_qpairs++; 845 } 846 847 return rc == -EINPROGRESS ? 0 : rc; 848 } 849 850 851 return -EINVAL; 852 } 853 854 int 855 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup, 856 struct spdk_nvme_transport_poll_group_stat **stats) 857 { 858 if (tgroup->transport->ops.poll_group_get_stats) { 859 return tgroup->transport->ops.poll_group_get_stats(tgroup, stats); 860 } 861 return -ENOTSUP; 862 } 863 864 void 865 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup, 866 struct spdk_nvme_transport_poll_group_stat *stats) 867 { 868 if (tgroup->transport->ops.poll_group_free_stats) { 869 tgroup->transport->ops.poll_group_free_stats(tgroup, stats); 870 } 871 } 872 873 spdk_nvme_transport_type_t 874 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport) 875 { 876 return transport->ops.type; 877 } 878 879 void 880 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size) 881 { 882 if (opts == NULL) { 883 SPDK_ERRLOG("opts should not be NULL.\n"); 884 return; 885 } 886 887 if (opts_size == 0) { 888 SPDK_ERRLOG("opts_size should not be zero.\n"); 889 return; 890 } 891 892 opts->opts_size = opts_size; 893 894 #define SET_FIELD(field) \ 895 if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \ 896 opts->field = g_spdk_nvme_transport_opts.field; \ 897 } \ 898 899 SET_FIELD(rdma_srq_size); 900 SET_FIELD(rdma_max_cq_size); 901 SET_FIELD(rdma_cm_event_timeout_ms); 902 903 /* Do not remove this statement, you should always update this statement when you adding a new field, 904 * and do not forget to add the SET_FIELD statement for your added field. */ 905 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size"); 906 907 #undef SET_FIELD 908 } 909 910 int 911 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size) 912 { 913 if (opts == NULL) { 914 SPDK_ERRLOG("opts should not be NULL.\n"); 915 return -EINVAL; 916 } 917 918 if (opts_size == 0) { 919 SPDK_ERRLOG("opts_size should not be zero.\n"); 920 return -EINVAL; 921 } 922 923 #define SET_FIELD(field) \ 924 if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 925 g_spdk_nvme_transport_opts.field = opts->field; \ 926 } \ 927 928 SET_FIELD(rdma_srq_size); 929 SET_FIELD(rdma_max_cq_size); 930 SET_FIELD(rdma_cm_event_timeout_ms); 931 932 g_spdk_nvme_transport_opts.opts_size = opts->opts_size; 933 934 #undef SET_FIELD 935 936 return 0; 937 } 938 939 volatile struct spdk_nvme_registers * 940 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr) 941 { 942 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 943 944 if (transport == NULL) { 945 /* Transport does not exist. */ 946 return NULL; 947 } 948 949 if (transport->ops.ctrlr_get_registers) { 950 return transport->ops.ctrlr_get_registers(ctrlr); 951 } 952 953 return NULL; 954 } 955