1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 5 * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 */ 7 8 /* 9 * NVMe transport abstraction 10 */ 11 12 #include "nvme_internal.h" 13 #include "spdk/queue.h" 14 15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16 16 17 struct spdk_nvme_transport { 18 struct spdk_nvme_transport_ops ops; 19 TAILQ_ENTRY(spdk_nvme_transport) link; 20 }; 21 22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports = 23 TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports); 24 25 static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {}; 26 static int g_current_transport_index = 0; 27 28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = { 29 .rdma_srq_size = 0, 30 .rdma_max_cq_size = 0, 31 .rdma_cm_event_timeout_ms = 1000 32 }; 33 34 const struct spdk_nvme_transport * 35 nvme_get_first_transport(void) 36 { 37 return TAILQ_FIRST(&g_spdk_nvme_transports); 38 } 39 40 const struct spdk_nvme_transport * 41 nvme_get_next_transport(const struct spdk_nvme_transport *transport) 42 { 43 return TAILQ_NEXT(transport, link); 44 } 45 46 /* 47 * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the 48 * transport object in either the controller struct or the admin qpair. This means 49 * that a lot of admin related transport calls will have to call nvme_get_transport 50 * in order to know which functions to call. 51 * In the I/O path, we have the ability to store the transport struct in the I/O 52 * qpairs to avoid taking a performance hit. 53 */ 54 const struct spdk_nvme_transport * 55 nvme_get_transport(const char *transport_name) 56 { 57 struct spdk_nvme_transport *registered_transport; 58 59 TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) { 60 if (strcasecmp(transport_name, registered_transport->ops.name) == 0) { 61 return registered_transport; 62 } 63 } 64 65 return NULL; 66 } 67 68 bool 69 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype) 70 { 71 return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true; 72 } 73 74 bool 75 spdk_nvme_transport_available_by_name(const char *transport_name) 76 { 77 return nvme_get_transport(transport_name) == NULL ? false : true; 78 } 79 80 void 81 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops) 82 { 83 struct spdk_nvme_transport *new_transport; 84 85 if (nvme_get_transport(ops->name)) { 86 SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name); 87 assert(false); 88 } 89 90 if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) { 91 SPDK_ERRLOG("Unable to register new NVMe transport.\n"); 92 assert(false); 93 return; 94 } 95 new_transport = &g_transports[g_current_transport_index++]; 96 97 new_transport->ops = *ops; 98 TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link); 99 } 100 101 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid, 102 const struct spdk_nvme_ctrlr_opts *opts, 103 void *devhandle) 104 { 105 const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring); 106 struct spdk_nvme_ctrlr *ctrlr; 107 108 if (transport == NULL) { 109 SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring); 110 return NULL; 111 } 112 113 ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle); 114 115 return ctrlr; 116 } 117 118 int 119 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx, 120 bool direct_connect) 121 { 122 const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring); 123 124 if (transport == NULL) { 125 SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring); 126 return -ENOENT; 127 } 128 129 return transport->ops.ctrlr_scan(probe_ctx, direct_connect); 130 } 131 132 int 133 nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx) 134 { 135 const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring); 136 137 if (transport == NULL) { 138 SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring); 139 return -ENOENT; 140 } 141 142 if (transport->ops.ctrlr_scan_attached != NULL) { 143 return transport->ops.ctrlr_scan_attached(probe_ctx); 144 } 145 SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n", 146 probe_ctx->trid.trstring); 147 return -ENOTSUP; 148 } 149 150 int 151 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr) 152 { 153 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 154 155 assert(transport != NULL); 156 return transport->ops.ctrlr_destruct(ctrlr); 157 } 158 159 int 160 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr) 161 { 162 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 163 164 assert(transport != NULL); 165 return transport->ops.ctrlr_enable(ctrlr); 166 } 167 168 int 169 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr) 170 { 171 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 172 173 assert(transport != NULL); 174 if (transport->ops.ctrlr_ready) { 175 return transport->ops.ctrlr_ready(ctrlr); 176 } 177 178 return 0; 179 } 180 181 int 182 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value) 183 { 184 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 185 186 assert(transport != NULL); 187 return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value); 188 } 189 190 int 191 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value) 192 { 193 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 194 195 assert(transport != NULL); 196 return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value); 197 } 198 199 int 200 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value) 201 { 202 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 203 204 assert(transport != NULL); 205 return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value); 206 } 207 208 int 209 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value) 210 { 211 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 212 213 assert(transport != NULL); 214 return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value); 215 } 216 217 static int 218 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value, 219 spdk_nvme_reg_cb cb_fn, void *cb_ctx) 220 { 221 struct nvme_register_completion *ctx; 222 223 ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 224 if (ctx == NULL) { 225 return -ENOMEM; 226 } 227 228 ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 229 ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 230 ctx->cb_fn = cb_fn; 231 ctx->cb_ctx = cb_ctx; 232 ctx->value = value; 233 ctx->pid = getpid(); 234 235 nvme_ctrlr_lock(ctrlr); 236 STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq); 237 nvme_ctrlr_unlock(ctrlr); 238 239 return 0; 240 } 241 242 int 243 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value, 244 spdk_nvme_reg_cb cb_fn, void *cb_arg) 245 { 246 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 247 int rc; 248 249 assert(transport != NULL); 250 if (transport->ops.ctrlr_set_reg_4_async == NULL) { 251 rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value); 252 if (rc != 0) { 253 return rc; 254 } 255 256 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 257 } 258 259 return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg); 260 } 261 262 int 263 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value, 264 spdk_nvme_reg_cb cb_fn, void *cb_arg) 265 266 { 267 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 268 int rc; 269 270 assert(transport != NULL); 271 if (transport->ops.ctrlr_set_reg_8_async == NULL) { 272 rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value); 273 if (rc != 0) { 274 return rc; 275 } 276 277 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 278 } 279 280 return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg); 281 } 282 283 int 284 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, 285 spdk_nvme_reg_cb cb_fn, void *cb_arg) 286 { 287 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 288 uint32_t value; 289 int rc; 290 291 assert(transport != NULL); 292 if (transport->ops.ctrlr_get_reg_4_async == NULL) { 293 rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value); 294 if (rc != 0) { 295 return rc; 296 } 297 298 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 299 } 300 301 return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg); 302 } 303 304 int 305 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, 306 spdk_nvme_reg_cb cb_fn, void *cb_arg) 307 { 308 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 309 uint64_t value; 310 int rc; 311 312 assert(transport != NULL); 313 if (transport->ops.ctrlr_get_reg_8_async == NULL) { 314 rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value); 315 if (rc != 0) { 316 return rc; 317 } 318 319 return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg); 320 } 321 322 return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg); 323 } 324 325 uint32_t 326 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr) 327 { 328 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 329 330 assert(transport != NULL); 331 return transport->ops.ctrlr_get_max_xfer_size(ctrlr); 332 } 333 334 uint16_t 335 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr) 336 { 337 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 338 339 assert(transport != NULL); 340 return transport->ops.ctrlr_get_max_sges(ctrlr); 341 } 342 343 int 344 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr) 345 { 346 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 347 348 assert(transport != NULL); 349 if (transport->ops.ctrlr_reserve_cmb != NULL) { 350 return transport->ops.ctrlr_reserve_cmb(ctrlr); 351 } 352 353 return -ENOTSUP; 354 } 355 356 void * 357 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size) 358 { 359 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 360 361 assert(transport != NULL); 362 if (transport->ops.ctrlr_map_cmb != NULL) { 363 return transport->ops.ctrlr_map_cmb(ctrlr, size); 364 } 365 366 return NULL; 367 } 368 369 int 370 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr) 371 { 372 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 373 374 assert(transport != NULL); 375 if (transport->ops.ctrlr_unmap_cmb != NULL) { 376 return transport->ops.ctrlr_unmap_cmb(ctrlr); 377 } 378 379 return 0; 380 } 381 382 int 383 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr) 384 { 385 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 386 387 assert(transport != NULL); 388 if (transport->ops.ctrlr_enable_pmr != NULL) { 389 return transport->ops.ctrlr_enable_pmr(ctrlr); 390 } 391 392 return -ENOSYS; 393 } 394 395 int 396 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr) 397 { 398 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 399 400 assert(transport != NULL); 401 if (transport->ops.ctrlr_disable_pmr != NULL) { 402 return transport->ops.ctrlr_disable_pmr(ctrlr); 403 } 404 405 return -ENOSYS; 406 } 407 408 void * 409 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size) 410 { 411 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 412 413 assert(transport != NULL); 414 if (transport->ops.ctrlr_map_pmr != NULL) { 415 return transport->ops.ctrlr_map_pmr(ctrlr, size); 416 } 417 418 return NULL; 419 } 420 421 int 422 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr) 423 { 424 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 425 426 assert(transport != NULL); 427 if (transport->ops.ctrlr_unmap_pmr != NULL) { 428 return transport->ops.ctrlr_unmap_pmr(ctrlr); 429 } 430 431 return -ENOSYS; 432 } 433 434 struct spdk_nvme_qpair * 435 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, 436 const struct spdk_nvme_io_qpair_opts *opts) 437 { 438 struct spdk_nvme_qpair *qpair; 439 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 440 441 assert(transport != NULL); 442 qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts); 443 if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) { 444 qpair->transport = transport; 445 } 446 447 return qpair; 448 } 449 450 void 451 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 452 { 453 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 454 int rc; 455 456 assert(transport != NULL); 457 458 /* Do not rely on qpair->transport. For multi-process cases, a foreign process may delete 459 * the IO qpair, in which case the transport object would be invalid (each process has their 460 * own unique transport objects since they contain function pointers). So we look up the 461 * transport object in the delete_io_qpair case. 462 */ 463 rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair); 464 if (rc != 0) { 465 SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n", 466 transport->ops.name); 467 assert(false); 468 } 469 } 470 471 static void 472 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused) 473 { 474 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 475 476 /* If the qpair was unable to reconnect, restore the original failure reason */ 477 qpair->transport_failure_reason = qpair->last_transport_failure_reason; 478 nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair); 479 } 480 481 int 482 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 483 { 484 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 485 int rc; 486 487 assert(transport != NULL); 488 if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) { 489 qpair->transport = transport; 490 } 491 492 qpair->last_transport_failure_reason = qpair->transport_failure_reason; 493 qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 494 495 nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING); 496 rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair); 497 if (rc != 0) { 498 goto err; 499 } 500 501 if (qpair->poll_group) { 502 rc = nvme_poll_group_connect_qpair(qpair); 503 if (rc) { 504 goto err; 505 } 506 } 507 508 if (!qpair->async) { 509 /* Busy wait until the qpair exits the connecting state */ 510 while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) { 511 if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) { 512 rc = spdk_nvme_poll_group_process_completions( 513 qpair->poll_group->group, 0, 514 nvme_transport_connect_qpair_fail); 515 } else { 516 rc = spdk_nvme_qpair_process_completions(qpair, 0); 517 } 518 519 if (rc < 0) { 520 goto err; 521 } 522 } 523 } 524 525 return 0; 526 err: 527 nvme_transport_connect_qpair_fail(qpair, NULL); 528 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) { 529 assert(qpair->async == true); 530 /* Let the caller to poll the qpair until it is actually disconnected. */ 531 return 0; 532 } 533 534 return rc; 535 } 536 537 void 538 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 539 { 540 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 541 542 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING || 543 nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) { 544 return; 545 } 546 547 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING); 548 assert(transport != NULL); 549 550 if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) { 551 nvme_poll_group_disconnect_qpair(qpair); 552 } 553 554 transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair); 555 } 556 557 void 558 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair) 559 { 560 if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) || 561 nvme_qpair_is_admin_queue(qpair)) { 562 nvme_qpair_abort_all_queued_reqs(qpair); 563 } 564 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 565 } 566 567 int 568 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 569 struct spdk_memory_domain **domains, int array_size) 570 { 571 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 572 573 assert(transport != NULL); 574 if (transport->ops.ctrlr_get_memory_domains) { 575 return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size); 576 } 577 578 return 0; 579 } 580 581 void 582 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair) 583 { 584 const struct spdk_nvme_transport *transport; 585 586 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 587 qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr); 588 } else { 589 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 590 assert(transport != NULL); 591 transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr); 592 } 593 } 594 595 int 596 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair) 597 { 598 const struct spdk_nvme_transport *transport; 599 600 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 601 return qpair->transport->ops.qpair_reset(qpair); 602 } 603 604 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 605 assert(transport != NULL); 606 return transport->ops.qpair_reset(qpair); 607 } 608 609 int 610 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 611 { 612 const struct spdk_nvme_transport *transport; 613 614 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 615 return qpair->transport->ops.qpair_submit_request(qpair, req); 616 } 617 618 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 619 assert(transport != NULL); 620 return transport->ops.qpair_submit_request(qpair, req); 621 } 622 623 int32_t 624 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 625 { 626 const struct spdk_nvme_transport *transport; 627 628 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 629 return qpair->transport->ops.qpair_process_completions(qpair, max_completions); 630 } 631 632 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 633 assert(transport != NULL); 634 return transport->ops.qpair_process_completions(qpair, max_completions); 635 } 636 637 int 638 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair, 639 int (*iter_fn)(struct nvme_request *req, void *arg), 640 void *arg) 641 { 642 const struct spdk_nvme_transport *transport; 643 644 if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) { 645 return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg); 646 } 647 648 transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 649 assert(transport != NULL); 650 return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg); 651 } 652 653 void 654 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair) 655 { 656 const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring); 657 658 assert(transport != NULL); 659 transport->ops.admin_qpair_abort_aers(qpair); 660 } 661 662 struct spdk_nvme_transport_poll_group * 663 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport) 664 { 665 struct spdk_nvme_transport_poll_group *group = NULL; 666 667 group = transport->ops.poll_group_create(); 668 if (group) { 669 group->transport = transport; 670 STAILQ_INIT(&group->connected_qpairs); 671 STAILQ_INIT(&group->disconnected_qpairs); 672 group->num_connected_qpairs = 0; 673 } 674 675 return group; 676 } 677 678 struct spdk_nvme_transport_poll_group * 679 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport, 680 struct spdk_nvme_qpair *qpair) 681 { 682 if (transport->ops.qpair_get_optimal_poll_group) { 683 return transport->ops.qpair_get_optimal_poll_group(qpair); 684 } else { 685 return NULL; 686 } 687 } 688 689 int 690 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup, 691 struct spdk_nvme_qpair *qpair) 692 { 693 int rc; 694 695 rc = tgroup->transport->ops.poll_group_add(tgroup, qpair); 696 if (rc == 0) { 697 qpair->poll_group = tgroup; 698 assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED); 699 qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs; 700 STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq); 701 } 702 703 return rc; 704 } 705 706 int 707 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup, 708 struct spdk_nvme_qpair *qpair) 709 { 710 int rc __attribute__((unused)); 711 712 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 713 return -EINVAL; 714 } else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) { 715 return -ENOENT; 716 } 717 718 rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair); 719 assert(rc == 0); 720 721 STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 722 723 qpair->poll_group = NULL; 724 qpair->poll_group_tailq_head = NULL; 725 726 return 0; 727 } 728 729 int64_t 730 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup, 731 uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 732 { 733 return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair, 734 disconnected_qpair_cb); 735 } 736 737 int 738 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup) 739 { 740 return tgroup->transport->ops.poll_group_destroy(tgroup); 741 } 742 743 int 744 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 745 { 746 struct spdk_nvme_transport_poll_group *tgroup; 747 int rc __attribute__((unused)); 748 749 tgroup = qpair->poll_group; 750 751 if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) { 752 return 0; 753 } 754 755 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 756 rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair); 757 assert(rc == 0); 758 759 qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs; 760 STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 761 assert(tgroup->num_connected_qpairs > 0); 762 tgroup->num_connected_qpairs--; 763 STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq); 764 765 return 0; 766 } 767 768 return -EINVAL; 769 } 770 771 int 772 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 773 { 774 struct spdk_nvme_transport_poll_group *tgroup; 775 int rc; 776 777 tgroup = qpair->poll_group; 778 779 if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) { 780 return 0; 781 } 782 783 if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) { 784 rc = tgroup->transport->ops.poll_group_connect_qpair(qpair); 785 if (rc == 0) { 786 qpair->poll_group_tailq_head = &tgroup->connected_qpairs; 787 STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq); 788 STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq); 789 tgroup->num_connected_qpairs++; 790 } 791 792 return rc == -EINPROGRESS ? 0 : rc; 793 } 794 795 796 return -EINVAL; 797 } 798 799 int 800 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup, 801 struct spdk_nvme_transport_poll_group_stat **stats) 802 { 803 if (tgroup->transport->ops.poll_group_get_stats) { 804 return tgroup->transport->ops.poll_group_get_stats(tgroup, stats); 805 } 806 return -ENOTSUP; 807 } 808 809 void 810 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup, 811 struct spdk_nvme_transport_poll_group_stat *stats) 812 { 813 if (tgroup->transport->ops.poll_group_free_stats) { 814 tgroup->transport->ops.poll_group_free_stats(tgroup, stats); 815 } 816 } 817 818 spdk_nvme_transport_type_t 819 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport) 820 { 821 return transport->ops.type; 822 } 823 824 void 825 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size) 826 { 827 if (opts == NULL) { 828 SPDK_ERRLOG("opts should not be NULL.\n"); 829 return; 830 } 831 832 if (opts_size == 0) { 833 SPDK_ERRLOG("opts_size should not be zero.\n"); 834 return; 835 } 836 837 opts->opts_size = opts_size; 838 839 #define SET_FIELD(field) \ 840 if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \ 841 opts->field = g_spdk_nvme_transport_opts.field; \ 842 } \ 843 844 SET_FIELD(rdma_srq_size); 845 SET_FIELD(rdma_max_cq_size); 846 SET_FIELD(rdma_cm_event_timeout_ms); 847 848 /* Do not remove this statement, you should always update this statement when you adding a new field, 849 * and do not forget to add the SET_FIELD statement for your added field. */ 850 SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size"); 851 852 #undef SET_FIELD 853 } 854 855 int 856 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size) 857 { 858 if (opts == NULL) { 859 SPDK_ERRLOG("opts should not be NULL.\n"); 860 return -EINVAL; 861 } 862 863 if (opts_size == 0) { 864 SPDK_ERRLOG("opts_size should not be zero.\n"); 865 return -EINVAL; 866 } 867 868 #define SET_FIELD(field) \ 869 if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \ 870 g_spdk_nvme_transport_opts.field = opts->field; \ 871 } \ 872 873 SET_FIELD(rdma_srq_size); 874 SET_FIELD(rdma_max_cq_size); 875 SET_FIELD(rdma_cm_event_timeout_ms); 876 877 g_spdk_nvme_transport_opts.opts_size = opts->opts_size; 878 879 #undef SET_FIELD 880 881 return 0; 882 } 883 884 volatile struct spdk_nvme_registers * 885 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr) 886 { 887 const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring); 888 889 if (transport == NULL) { 890 /* Transport does not exist. */ 891 return NULL; 892 } 893 894 if (transport->ops.ctrlr_get_registers) { 895 return transport->ops.ctrlr_get_registers(ctrlr); 896 } 897 898 return NULL; 899 } 900