1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. All rights reserved. 3 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 /* 8 * NVMe over PCIe common library 9 */ 10 11 #include "spdk/stdinc.h" 12 #include "spdk/likely.h" 13 #include "spdk/string.h" 14 #include "nvme_internal.h" 15 #include "nvme_pcie_internal.h" 16 #include "spdk/trace.h" 17 18 #include "spdk_internal/trace_defs.h" 19 20 __thread struct nvme_pcie_ctrlr *g_thread_mmio_ctrlr = NULL; 21 22 static struct spdk_nvme_pcie_stat g_dummy_stat = {}; 23 24 static void nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair, 25 struct nvme_tracker *tr); 26 27 static inline uint64_t 28 nvme_pcie_vtophys(struct spdk_nvme_ctrlr *ctrlr, const void *buf, uint64_t *size) 29 { 30 if (spdk_likely(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE)) { 31 return spdk_vtophys(buf, size); 32 } else { 33 /* vfio-user address translation with IOVA=VA mode */ 34 return (uint64_t)(uintptr_t)buf; 35 } 36 } 37 38 int 39 nvme_pcie_qpair_reset(struct spdk_nvme_qpair *qpair) 40 { 41 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 42 uint32_t i; 43 44 /* all head/tail vals are set to 0 */ 45 pqpair->last_sq_tail = pqpair->sq_tail = pqpair->sq_head = pqpair->cq_head = 0; 46 47 /* 48 * First time through the completion queue, HW will set phase 49 * bit on completions to 1. So set this to 1 here, indicating 50 * we're looking for a 1 to know which entries have completed. 51 * we'll toggle the bit each time when the completion queue 52 * rolls over. 53 */ 54 pqpair->flags.phase = 1; 55 for (i = 0; i < pqpair->num_entries; i++) { 56 pqpair->cpl[i].status.p = 0; 57 } 58 59 return 0; 60 } 61 62 static void 63 nvme_qpair_construct_tracker(struct nvme_tracker *tr, uint16_t cid, uint64_t phys_addr) 64 { 65 tr->prp_sgl_bus_addr = phys_addr + offsetof(struct nvme_tracker, u.prp); 66 tr->cid = cid; 67 tr->req = NULL; 68 } 69 70 static void * 71 nvme_pcie_ctrlr_alloc_cmb(struct spdk_nvme_ctrlr *ctrlr, uint64_t size, uint64_t alignment, 72 uint64_t *phys_addr) 73 { 74 struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr); 75 uintptr_t addr; 76 77 if (pctrlr->cmb.mem_register_addr != NULL) { 78 /* BAR is mapped for data */ 79 return NULL; 80 } 81 82 addr = (uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.current_offset; 83 addr = (addr + (alignment - 1)) & ~(alignment - 1); 84 85 /* CMB may only consume part of the BAR, calculate accordingly */ 86 if (addr + size > ((uintptr_t)pctrlr->cmb.bar_va + pctrlr->cmb.size)) { 87 SPDK_ERRLOG("Tried to allocate past valid CMB range!\n"); 88 return NULL; 89 } 90 *phys_addr = pctrlr->cmb.bar_pa + addr - (uintptr_t)pctrlr->cmb.bar_va; 91 92 pctrlr->cmb.current_offset = (addr + size) - (uintptr_t)pctrlr->cmb.bar_va; 93 94 return (void *)addr; 95 } 96 97 int 98 nvme_pcie_qpair_construct(struct spdk_nvme_qpair *qpair, 99 const struct spdk_nvme_io_qpair_opts *opts) 100 { 101 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 102 struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr); 103 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 104 struct nvme_tracker *tr; 105 uint16_t i; 106 uint16_t num_trackers; 107 size_t page_align = sysconf(_SC_PAGESIZE); 108 size_t queue_align, queue_len; 109 uint32_t flags = SPDK_MALLOC_DMA; 110 uint64_t sq_paddr = 0; 111 uint64_t cq_paddr = 0; 112 113 if (opts) { 114 pqpair->sq_vaddr = opts->sq.vaddr; 115 pqpair->cq_vaddr = opts->cq.vaddr; 116 sq_paddr = opts->sq.paddr; 117 cq_paddr = opts->cq.paddr; 118 } 119 120 pqpair->retry_count = ctrlr->opts.transport_retry_count; 121 122 /* 123 * Limit the maximum number of completions to return per call to prevent wraparound, 124 * and calculate how many trackers can be submitted at once without overflowing the 125 * completion queue. 126 */ 127 pqpair->max_completions_cap = pqpair->num_entries / 4; 128 pqpair->max_completions_cap = spdk_max(pqpair->max_completions_cap, NVME_MIN_COMPLETIONS); 129 pqpair->max_completions_cap = spdk_min(pqpair->max_completions_cap, NVME_MAX_COMPLETIONS); 130 num_trackers = pqpair->num_entries - pqpair->max_completions_cap; 131 132 SPDK_INFOLOG(nvme, "max_completions_cap = %" PRIu16 " num_trackers = %" PRIu16 "\n", 133 pqpair->max_completions_cap, num_trackers); 134 135 assert(num_trackers != 0); 136 137 pqpair->sq_in_cmb = false; 138 139 if (nvme_qpair_is_admin_queue(&pqpair->qpair)) { 140 flags |= SPDK_MALLOC_SHARE; 141 } 142 143 /* cmd and cpl rings must be aligned on page size boundaries. */ 144 if (ctrlr->opts.use_cmb_sqs) { 145 pqpair->cmd = nvme_pcie_ctrlr_alloc_cmb(ctrlr, pqpair->num_entries * sizeof(struct spdk_nvme_cmd), 146 page_align, &pqpair->cmd_bus_addr); 147 if (pqpair->cmd != NULL) { 148 pqpair->sq_in_cmb = true; 149 } 150 } 151 152 if (pqpair->sq_in_cmb == false) { 153 if (pqpair->sq_vaddr) { 154 pqpair->cmd = pqpair->sq_vaddr; 155 } else { 156 /* To ensure physical address contiguity we make each ring occupy 157 * a single hugepage only. See MAX_IO_QUEUE_ENTRIES. 158 */ 159 queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cmd); 160 queue_align = spdk_max(spdk_align32pow2(queue_len), page_align); 161 pqpair->cmd = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags); 162 if (pqpair->cmd == NULL) { 163 SPDK_ERRLOG("alloc qpair_cmd failed\n"); 164 return -ENOMEM; 165 } 166 } 167 if (sq_paddr) { 168 assert(pqpair->sq_vaddr != NULL); 169 pqpair->cmd_bus_addr = sq_paddr; 170 } else { 171 pqpair->cmd_bus_addr = nvme_pcie_vtophys(ctrlr, pqpair->cmd, NULL); 172 if (pqpair->cmd_bus_addr == SPDK_VTOPHYS_ERROR) { 173 SPDK_ERRLOG("spdk_vtophys(pqpair->cmd) failed\n"); 174 return -EFAULT; 175 } 176 } 177 } 178 179 if (pqpair->cq_vaddr) { 180 pqpair->cpl = pqpair->cq_vaddr; 181 } else { 182 queue_len = pqpair->num_entries * sizeof(struct spdk_nvme_cpl); 183 queue_align = spdk_max(spdk_align32pow2(queue_len), page_align); 184 pqpair->cpl = spdk_zmalloc(queue_len, queue_align, NULL, SPDK_ENV_SOCKET_ID_ANY, flags); 185 if (pqpair->cpl == NULL) { 186 SPDK_ERRLOG("alloc qpair_cpl failed\n"); 187 return -ENOMEM; 188 } 189 } 190 if (cq_paddr) { 191 assert(pqpair->cq_vaddr != NULL); 192 pqpair->cpl_bus_addr = cq_paddr; 193 } else { 194 pqpair->cpl_bus_addr = nvme_pcie_vtophys(ctrlr, pqpair->cpl, NULL); 195 if (pqpair->cpl_bus_addr == SPDK_VTOPHYS_ERROR) { 196 SPDK_ERRLOG("spdk_vtophys(pqpair->cpl) failed\n"); 197 return -EFAULT; 198 } 199 } 200 201 pqpair->sq_tdbl = pctrlr->doorbell_base + (2 * qpair->id + 0) * pctrlr->doorbell_stride_u32; 202 pqpair->cq_hdbl = pctrlr->doorbell_base + (2 * qpair->id + 1) * pctrlr->doorbell_stride_u32; 203 204 /* 205 * Reserve space for all of the trackers in a single allocation. 206 * struct nvme_tracker must be padded so that its size is already a power of 2. 207 * This ensures the PRP list embedded in the nvme_tracker object will not span a 208 * 4KB boundary, while allowing access to trackers in tr[] via normal array indexing. 209 */ 210 pqpair->tr = spdk_zmalloc(num_trackers * sizeof(*tr), sizeof(*tr), NULL, 211 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 212 if (pqpair->tr == NULL) { 213 SPDK_ERRLOG("nvme_tr failed\n"); 214 return -ENOMEM; 215 } 216 217 TAILQ_INIT(&pqpair->free_tr); 218 TAILQ_INIT(&pqpair->outstanding_tr); 219 220 for (i = 0; i < num_trackers; i++) { 221 tr = &pqpair->tr[i]; 222 nvme_qpair_construct_tracker(tr, i, nvme_pcie_vtophys(ctrlr, tr, NULL)); 223 TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list); 224 } 225 226 nvme_pcie_qpair_reset(qpair); 227 228 return 0; 229 } 230 231 int 232 nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t num_entries) 233 { 234 struct nvme_pcie_qpair *pqpair; 235 int rc; 236 237 pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 238 if (pqpair == NULL) { 239 return -ENOMEM; 240 } 241 242 pqpair->num_entries = num_entries; 243 pqpair->flags.delay_cmd_submit = 0; 244 pqpair->pcie_state = NVME_PCIE_QPAIR_READY; 245 246 ctrlr->adminq = &pqpair->qpair; 247 248 rc = nvme_qpair_init(ctrlr->adminq, 249 0, /* qpair ID */ 250 ctrlr, 251 SPDK_NVME_QPRIO_URGENT, 252 num_entries, 253 false); 254 if (rc != 0) { 255 return rc; 256 } 257 258 pqpair->stat = spdk_zmalloc(sizeof(*pqpair->stat), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, 259 SPDK_MALLOC_SHARE); 260 if (!pqpair->stat) { 261 SPDK_ERRLOG("Failed to allocate admin qpair statistics\n"); 262 return -ENOMEM; 263 } 264 265 return nvme_pcie_qpair_construct(ctrlr->adminq, NULL); 266 } 267 268 /** 269 * Note: the ctrlr_lock must be held when calling this function. 270 */ 271 void 272 nvme_pcie_qpair_insert_pending_admin_request(struct spdk_nvme_qpair *qpair, 273 struct nvme_request *req, struct spdk_nvme_cpl *cpl) 274 { 275 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 276 struct nvme_request *active_req = req; 277 struct spdk_nvme_ctrlr_process *active_proc; 278 279 /* 280 * The admin request is from another process. Move to the per 281 * process list for that process to handle it later. 282 */ 283 assert(nvme_qpair_is_admin_queue(qpair)); 284 assert(active_req->pid != getpid()); 285 286 active_proc = nvme_ctrlr_get_process(ctrlr, active_req->pid); 287 if (active_proc) { 288 /* Save the original completion information */ 289 memcpy(&active_req->cpl, cpl, sizeof(*cpl)); 290 STAILQ_INSERT_TAIL(&active_proc->active_reqs, active_req, stailq); 291 } else { 292 SPDK_ERRLOG("The owning process (pid %d) is not found. Dropping the request.\n", 293 active_req->pid); 294 if (active_req->user_buffer && active_req->payload_size) { 295 spdk_free(active_req->payload.contig_or_cb_arg); 296 } 297 nvme_free_request(active_req); 298 } 299 } 300 301 /** 302 * Note: the ctrlr_lock must be held when calling this function. 303 */ 304 void 305 nvme_pcie_qpair_complete_pending_admin_request(struct spdk_nvme_qpair *qpair) 306 { 307 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 308 struct nvme_request *req, *tmp_req; 309 pid_t pid = getpid(); 310 struct spdk_nvme_ctrlr_process *proc; 311 312 /* 313 * Check whether there is any pending admin request from 314 * other active processes. 315 */ 316 assert(nvme_qpair_is_admin_queue(qpair)); 317 318 proc = nvme_ctrlr_get_current_process(ctrlr); 319 if (!proc) { 320 SPDK_ERRLOG("the active process (pid %d) is not found for this controller.\n", pid); 321 assert(proc); 322 return; 323 } 324 325 STAILQ_FOREACH_SAFE(req, &proc->active_reqs, stailq, tmp_req) { 326 STAILQ_REMOVE(&proc->active_reqs, req, nvme_request, stailq); 327 328 assert(req->pid == pid); 329 330 nvme_complete_request(req->cb_fn, req->cb_arg, qpair, req, &req->cpl); 331 nvme_free_request(req); 332 } 333 } 334 335 int 336 nvme_pcie_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr, 337 struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, 338 void *cb_arg) 339 { 340 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que); 341 struct nvme_request *req; 342 struct spdk_nvme_cmd *cmd; 343 344 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 345 if (req == NULL) { 346 return -ENOMEM; 347 } 348 349 cmd = &req->cmd; 350 cmd->opc = SPDK_NVME_OPC_CREATE_IO_CQ; 351 352 cmd->cdw10_bits.create_io_q.qid = io_que->id; 353 cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1; 354 355 cmd->cdw11_bits.create_io_cq.pc = 1; 356 cmd->dptr.prp.prp1 = pqpair->cpl_bus_addr; 357 358 return nvme_ctrlr_submit_admin_request(ctrlr, req); 359 } 360 361 int 362 nvme_pcie_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr, 363 struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 364 { 365 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(io_que); 366 struct nvme_request *req; 367 struct spdk_nvme_cmd *cmd; 368 369 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 370 if (req == NULL) { 371 return -ENOMEM; 372 } 373 374 cmd = &req->cmd; 375 cmd->opc = SPDK_NVME_OPC_CREATE_IO_SQ; 376 377 cmd->cdw10_bits.create_io_q.qid = io_que->id; 378 cmd->cdw10_bits.create_io_q.qsize = pqpair->num_entries - 1; 379 cmd->cdw11_bits.create_io_sq.pc = 1; 380 cmd->cdw11_bits.create_io_sq.qprio = io_que->qprio; 381 cmd->cdw11_bits.create_io_sq.cqid = io_que->id; 382 cmd->dptr.prp.prp1 = pqpair->cmd_bus_addr; 383 384 return nvme_ctrlr_submit_admin_request(ctrlr, req); 385 } 386 387 int 388 nvme_pcie_ctrlr_cmd_delete_io_cq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 389 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 390 { 391 struct nvme_request *req; 392 struct spdk_nvme_cmd *cmd; 393 394 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 395 if (req == NULL) { 396 return -ENOMEM; 397 } 398 399 cmd = &req->cmd; 400 cmd->opc = SPDK_NVME_OPC_DELETE_IO_CQ; 401 cmd->cdw10_bits.delete_io_q.qid = qpair->id; 402 403 return nvme_ctrlr_submit_admin_request(ctrlr, req); 404 } 405 406 int 407 nvme_pcie_ctrlr_cmd_delete_io_sq(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 408 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 409 { 410 struct nvme_request *req; 411 struct spdk_nvme_cmd *cmd; 412 413 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 414 if (req == NULL) { 415 return -ENOMEM; 416 } 417 418 cmd = &req->cmd; 419 cmd->opc = SPDK_NVME_OPC_DELETE_IO_SQ; 420 cmd->cdw10_bits.delete_io_q.qid = qpair->id; 421 422 return nvme_ctrlr_submit_admin_request(ctrlr, req); 423 } 424 425 static void 426 nvme_completion_sq_error_delete_cq_cb(void *arg, const struct spdk_nvme_cpl *cpl) 427 { 428 struct spdk_nvme_qpair *qpair = arg; 429 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 430 431 if (spdk_nvme_cpl_is_error(cpl)) { 432 SPDK_ERRLOG("delete_io_cq failed!\n"); 433 } 434 435 pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED; 436 } 437 438 static void 439 nvme_completion_create_sq_cb(void *arg, const struct spdk_nvme_cpl *cpl) 440 { 441 struct spdk_nvme_qpair *qpair = arg; 442 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 443 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 444 struct nvme_pcie_ctrlr *pctrlr = nvme_pcie_ctrlr(ctrlr); 445 int rc; 446 447 if (pqpair->flags.defer_destruction) { 448 /* This qpair was deleted by the application while the 449 * connection was still in progress. We had to wait 450 * to free the qpair resources until this outstanding 451 * command was completed. Now that we have the completion 452 * free it now. 453 */ 454 nvme_pcie_qpair_destroy(qpair); 455 return; 456 } 457 458 if (spdk_nvme_cpl_is_error(cpl)) { 459 SPDK_ERRLOG("nvme_create_io_sq failed, deleting cq!\n"); 460 rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_sq_error_delete_cq_cb, 461 qpair); 462 if (rc != 0) { 463 SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc); 464 pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED; 465 } 466 return; 467 } 468 pqpair->pcie_state = NVME_PCIE_QPAIR_READY; 469 if (ctrlr->shadow_doorbell) { 470 pqpair->shadow_doorbell.sq_tdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 0) * 471 pctrlr->doorbell_stride_u32; 472 pqpair->shadow_doorbell.cq_hdbl = ctrlr->shadow_doorbell + (2 * qpair->id + 1) * 473 pctrlr->doorbell_stride_u32; 474 pqpair->shadow_doorbell.sq_eventidx = ctrlr->eventidx + (2 * qpair->id + 0) * 475 pctrlr->doorbell_stride_u32; 476 pqpair->shadow_doorbell.cq_eventidx = ctrlr->eventidx + (2 * qpair->id + 1) * 477 pctrlr->doorbell_stride_u32; 478 pqpair->flags.has_shadow_doorbell = 1; 479 } else { 480 pqpair->flags.has_shadow_doorbell = 0; 481 } 482 nvme_pcie_qpair_reset(qpair); 483 484 } 485 486 static void 487 nvme_completion_create_cq_cb(void *arg, const struct spdk_nvme_cpl *cpl) 488 { 489 struct spdk_nvme_qpair *qpair = arg; 490 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 491 int rc; 492 493 if (pqpair->flags.defer_destruction) { 494 /* This qpair was deleted by the application while the 495 * connection was still in progress. We had to wait 496 * to free the qpair resources until this outstanding 497 * command was completed. Now that we have the completion 498 * free it now. 499 */ 500 nvme_pcie_qpair_destroy(qpair); 501 return; 502 } 503 504 if (spdk_nvme_cpl_is_error(cpl)) { 505 pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED; 506 SPDK_ERRLOG("nvme_create_io_cq failed!\n"); 507 return; 508 } 509 510 rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_create_sq_cb, qpair); 511 512 if (rc != 0) { 513 SPDK_ERRLOG("Failed to send request to create_io_sq, deleting cq!\n"); 514 rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_sq_error_delete_cq_cb, 515 qpair); 516 if (rc != 0) { 517 SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc); 518 pqpair->pcie_state = NVME_PCIE_QPAIR_FAILED; 519 } 520 return; 521 } 522 pqpair->pcie_state = NVME_PCIE_QPAIR_WAIT_FOR_SQ; 523 } 524 525 static int 526 _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 527 uint16_t qid) 528 { 529 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 530 int rc; 531 532 /* Statistics may already be allocated in the case of controller reset */ 533 if (qpair->poll_group) { 534 struct nvme_pcie_poll_group *group = SPDK_CONTAINEROF(qpair->poll_group, 535 struct nvme_pcie_poll_group, group); 536 537 pqpair->stat = &group->stats; 538 pqpair->shared_stats = true; 539 } else { 540 if (pqpair->stat == NULL) { 541 pqpair->stat = calloc(1, sizeof(*pqpair->stat)); 542 if (!pqpair->stat) { 543 SPDK_ERRLOG("Failed to allocate qpair statistics\n"); 544 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 545 return -ENOMEM; 546 } 547 } 548 } 549 550 rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_create_cq_cb, qpair); 551 552 if (rc != 0) { 553 SPDK_ERRLOG("Failed to send request to create_io_cq\n"); 554 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 555 return rc; 556 } 557 pqpair->pcie_state = NVME_PCIE_QPAIR_WAIT_FOR_CQ; 558 return 0; 559 } 560 561 int 562 nvme_pcie_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 563 { 564 int rc = 0; 565 566 if (!nvme_qpair_is_admin_queue(qpair)) { 567 rc = _nvme_pcie_ctrlr_create_io_qpair(ctrlr, qpair, qpair->id); 568 } else { 569 nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED); 570 } 571 572 return rc; 573 } 574 575 void 576 nvme_pcie_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 577 { 578 if (!nvme_qpair_is_admin_queue(qpair) || !ctrlr->is_disconnecting) { 579 nvme_transport_ctrlr_disconnect_qpair_done(qpair); 580 } else { 581 /* If this function is called for the admin qpair via spdk_nvme_ctrlr_reset() 582 * or spdk_nvme_ctrlr_disconnect(), initiate a Controller Level Reset. 583 * Then we can abort trackers safely because the Controller Level Reset deletes 584 * all I/O SQ/CQs. 585 */ 586 nvme_ctrlr_disable(ctrlr); 587 } 588 } 589 590 /* Used when dst points to MMIO (i.e. CMB) in a virtual machine - in these cases we must 591 * not use wide instructions because QEMU will not emulate such instructions to MMIO space. 592 * So this function ensures we only copy 8 bytes at a time. 593 */ 594 static inline void 595 nvme_pcie_copy_command_mmio(struct spdk_nvme_cmd *dst, const struct spdk_nvme_cmd *src) 596 { 597 uint64_t *dst64 = (uint64_t *)dst; 598 const uint64_t *src64 = (const uint64_t *)src; 599 uint32_t i; 600 601 for (i = 0; i < sizeof(*dst) / 8; i++) { 602 dst64[i] = src64[i]; 603 } 604 } 605 606 static inline void 607 nvme_pcie_copy_command(struct spdk_nvme_cmd *dst, const struct spdk_nvme_cmd *src) 608 { 609 /* dst and src are known to be non-overlapping and 64-byte aligned. */ 610 #if defined(__SSE2__) 611 __m128i *d128 = (__m128i *)dst; 612 const __m128i *s128 = (const __m128i *)src; 613 614 _mm_stream_si128(&d128[0], _mm_load_si128(&s128[0])); 615 _mm_stream_si128(&d128[1], _mm_load_si128(&s128[1])); 616 _mm_stream_si128(&d128[2], _mm_load_si128(&s128[2])); 617 _mm_stream_si128(&d128[3], _mm_load_si128(&s128[3])); 618 #else 619 *dst = *src; 620 #endif 621 } 622 623 void 624 nvme_pcie_qpair_submit_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr) 625 { 626 struct nvme_request *req; 627 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 628 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 629 630 req = tr->req; 631 assert(req != NULL); 632 633 spdk_trace_record(TRACE_NVME_PCIE_SUBMIT, qpair->id, 0, (uintptr_t)req, req->cb_arg, 634 (uint32_t)req->cmd.cid, (uint32_t)req->cmd.opc, 635 req->cmd.cdw10, req->cmd.cdw11, req->cmd.cdw12); 636 637 if (req->cmd.fuse) { 638 /* 639 * Keep track of the fuse operation sequence so that we ring the doorbell only 640 * after the second fuse is submitted. 641 */ 642 qpair->last_fuse = req->cmd.fuse; 643 } 644 645 /* Don't use wide instructions to copy NVMe command, this is limited by QEMU 646 * virtual NVMe controller, the maximum access width is 8 Bytes for one time. 647 */ 648 if (spdk_unlikely((ctrlr->quirks & NVME_QUIRK_MAXIMUM_PCI_ACCESS_WIDTH) && pqpair->sq_in_cmb)) { 649 nvme_pcie_copy_command_mmio(&pqpair->cmd[pqpair->sq_tail], &req->cmd); 650 } else { 651 /* Copy the command from the tracker to the submission queue. */ 652 nvme_pcie_copy_command(&pqpair->cmd[pqpair->sq_tail], &req->cmd); 653 } 654 655 if (spdk_unlikely(++pqpair->sq_tail == pqpair->num_entries)) { 656 pqpair->sq_tail = 0; 657 } 658 659 if (spdk_unlikely(pqpair->sq_tail == pqpair->sq_head)) { 660 SPDK_ERRLOG("sq_tail is passing sq_head!\n"); 661 } 662 663 if (!pqpair->flags.delay_cmd_submit) { 664 nvme_pcie_qpair_ring_sq_doorbell(qpair); 665 } 666 } 667 668 void 669 nvme_pcie_qpair_complete_tracker(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr, 670 struct spdk_nvme_cpl *cpl, bool print_on_error) 671 { 672 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 673 struct nvme_request *req; 674 bool retry, error; 675 bool req_from_current_proc = true; 676 bool print_error; 677 678 req = tr->req; 679 680 spdk_trace_record(TRACE_NVME_PCIE_COMPLETE, qpair->id, 0, (uintptr_t)req, req->cb_arg, 681 (uint32_t)req->cmd.cid, (uint32_t)cpl->status_raw); 682 683 assert(req != NULL); 684 685 error = spdk_nvme_cpl_is_error(cpl); 686 retry = error && nvme_completion_is_retry(cpl) && 687 req->retries < pqpair->retry_count; 688 print_error = error && print_on_error && !qpair->ctrlr->opts.disable_error_logging; 689 690 if (print_error) { 691 spdk_nvme_qpair_print_command(qpair, &req->cmd); 692 } 693 694 if (print_error || SPDK_DEBUGLOG_FLAG_ENABLED("nvme")) { 695 spdk_nvme_qpair_print_completion(qpair, cpl); 696 } 697 698 assert(cpl->cid == req->cmd.cid); 699 700 if (retry) { 701 req->retries++; 702 nvme_pcie_qpair_submit_tracker(qpair, tr); 703 } else { 704 TAILQ_REMOVE(&pqpair->outstanding_tr, tr, tq_list); 705 706 /* Only check admin requests from different processes. */ 707 if (nvme_qpair_is_admin_queue(qpair) && req->pid != getpid()) { 708 req_from_current_proc = false; 709 nvme_pcie_qpair_insert_pending_admin_request(qpair, req, cpl); 710 } else { 711 nvme_complete_request(tr->cb_fn, tr->cb_arg, qpair, req, cpl); 712 } 713 714 if (req_from_current_proc == true) { 715 nvme_qpair_free_request(qpair, req); 716 } 717 718 tr->req = NULL; 719 720 TAILQ_INSERT_HEAD(&pqpair->free_tr, tr, tq_list); 721 } 722 } 723 724 void 725 nvme_pcie_qpair_manual_complete_tracker(struct spdk_nvme_qpair *qpair, 726 struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr, 727 bool print_on_error) 728 { 729 struct spdk_nvme_cpl cpl; 730 731 memset(&cpl, 0, sizeof(cpl)); 732 cpl.sqid = qpair->id; 733 cpl.cid = tr->cid; 734 cpl.status.sct = sct; 735 cpl.status.sc = sc; 736 cpl.status.dnr = dnr; 737 nvme_pcie_qpair_complete_tracker(qpair, tr, &cpl, print_on_error); 738 } 739 740 void 741 nvme_pcie_qpair_abort_trackers(struct spdk_nvme_qpair *qpair, uint32_t dnr) 742 { 743 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 744 struct nvme_tracker *tr, *temp, *last; 745 746 last = TAILQ_LAST(&pqpair->outstanding_tr, nvme_outstanding_tr_head); 747 748 /* Abort previously submitted (outstanding) trs */ 749 TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, temp) { 750 if (!qpair->ctrlr->opts.disable_error_logging) { 751 SPDK_ERRLOG("aborting outstanding command\n"); 752 } 753 nvme_pcie_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, 754 SPDK_NVME_SC_ABORTED_BY_REQUEST, dnr, true); 755 756 if (tr == last) { 757 break; 758 } 759 } 760 } 761 762 void 763 nvme_pcie_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair) 764 { 765 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 766 struct nvme_tracker *tr; 767 768 tr = TAILQ_FIRST(&pqpair->outstanding_tr); 769 while (tr != NULL) { 770 assert(tr->req != NULL); 771 if (tr->req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) { 772 nvme_pcie_qpair_manual_complete_tracker(qpair, tr, 773 SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_ABORTED_SQ_DELETION, 0, 774 false); 775 tr = TAILQ_FIRST(&pqpair->outstanding_tr); 776 } else { 777 tr = TAILQ_NEXT(tr, tq_list); 778 } 779 } 780 } 781 782 void 783 nvme_pcie_admin_qpair_destroy(struct spdk_nvme_qpair *qpair) 784 { 785 nvme_pcie_admin_qpair_abort_aers(qpair); 786 } 787 788 void 789 nvme_pcie_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr) 790 { 791 nvme_pcie_qpair_abort_trackers(qpair, dnr); 792 } 793 794 static void 795 nvme_pcie_qpair_check_timeout(struct spdk_nvme_qpair *qpair) 796 { 797 uint64_t t02; 798 struct nvme_tracker *tr, *tmp; 799 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 800 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 801 struct spdk_nvme_ctrlr_process *active_proc; 802 803 /* Don't check timeouts during controller initialization. */ 804 if (ctrlr->state != NVME_CTRLR_STATE_READY) { 805 return; 806 } 807 808 if (nvme_qpair_is_admin_queue(qpair)) { 809 active_proc = nvme_ctrlr_get_current_process(ctrlr); 810 } else { 811 active_proc = qpair->active_proc; 812 } 813 814 /* Only check timeouts if the current process has a timeout callback. */ 815 if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) { 816 return; 817 } 818 819 t02 = spdk_get_ticks(); 820 TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) { 821 assert(tr->req != NULL); 822 823 if (nvme_request_check_timeout(tr->req, tr->cid, active_proc, t02)) { 824 /* 825 * The requests are in order, so as soon as one has not timed out, 826 * stop iterating. 827 */ 828 break; 829 } 830 } 831 } 832 833 int32_t 834 nvme_pcie_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 835 { 836 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 837 struct nvme_tracker *tr; 838 struct spdk_nvme_cpl *cpl, *next_cpl; 839 uint32_t num_completions = 0; 840 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 841 uint16_t next_cq_head; 842 uint8_t next_phase; 843 bool next_is_valid = false; 844 int rc; 845 846 if (spdk_unlikely(pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED)) { 847 return -ENXIO; 848 } 849 850 if (spdk_unlikely(nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING)) { 851 if (pqpair->pcie_state == NVME_PCIE_QPAIR_READY) { 852 /* It is possible that another thread set the pcie_state to 853 * QPAIR_READY, if it polled the adminq and processed the SQ 854 * completion for this qpair. So check for that condition 855 * here and then update the qpair's state to CONNECTED, since 856 * we can only set the qpair state from the qpair's thread. 857 * (Note: this fixed issue #2157.) 858 */ 859 nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED); 860 } else if (pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED) { 861 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 862 return -ENXIO; 863 } else { 864 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 865 if (rc < 0) { 866 return rc; 867 } else if (pqpair->pcie_state == NVME_PCIE_QPAIR_FAILED) { 868 nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED); 869 return -ENXIO; 870 } 871 } 872 return 0; 873 } 874 875 if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) { 876 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 877 } 878 879 if (max_completions == 0 || max_completions > pqpair->max_completions_cap) { 880 /* 881 * max_completions == 0 means unlimited, but complete at most 882 * max_completions_cap batch of I/O at a time so that the completion 883 * queue doorbells don't wrap around. 884 */ 885 max_completions = pqpair->max_completions_cap; 886 } 887 888 pqpair->stat->polls++; 889 890 while (1) { 891 cpl = &pqpair->cpl[pqpair->cq_head]; 892 893 if (!next_is_valid && cpl->status.p != pqpair->flags.phase) { 894 break; 895 } 896 897 if (spdk_likely(pqpair->cq_head + 1 != pqpair->num_entries)) { 898 next_cq_head = pqpair->cq_head + 1; 899 next_phase = pqpair->flags.phase; 900 } else { 901 next_cq_head = 0; 902 next_phase = !pqpair->flags.phase; 903 } 904 next_cpl = &pqpair->cpl[next_cq_head]; 905 next_is_valid = (next_cpl->status.p == next_phase); 906 if (next_is_valid) { 907 __builtin_prefetch(&pqpair->tr[next_cpl->cid]); 908 } 909 910 #if defined(__PPC64__) || defined(__riscv) || defined(__loongarch__) 911 /* 912 * This memory barrier prevents reordering of: 913 * - load after store from/to tr 914 * - load after load cpl phase and cpl cid 915 */ 916 spdk_mb(); 917 #elif defined(__aarch64__) 918 __asm volatile("dmb oshld" ::: "memory"); 919 #endif 920 921 if (spdk_unlikely(++pqpair->cq_head == pqpair->num_entries)) { 922 pqpair->cq_head = 0; 923 pqpair->flags.phase = !pqpair->flags.phase; 924 } 925 926 tr = &pqpair->tr[cpl->cid]; 927 pqpair->sq_head = cpl->sqhd; 928 929 if (tr->req) { 930 /* Prefetch the req's STAILQ_ENTRY since we'll need to access it 931 * as part of putting the req back on the qpair's free list. 932 */ 933 __builtin_prefetch(&tr->req->stailq); 934 nvme_pcie_qpair_complete_tracker(qpair, tr, cpl, true); 935 } else { 936 SPDK_ERRLOG("cpl does not map to outstanding cmd\n"); 937 spdk_nvme_qpair_print_completion(qpair, cpl); 938 assert(0); 939 } 940 941 if (++num_completions == max_completions) { 942 break; 943 } 944 } 945 946 if (num_completions > 0) { 947 pqpair->stat->completions += num_completions; 948 nvme_pcie_qpair_ring_cq_doorbell(qpair); 949 } else { 950 pqpair->stat->idle_polls++; 951 } 952 953 if (pqpair->flags.delay_cmd_submit) { 954 if (pqpair->last_sq_tail != pqpair->sq_tail) { 955 nvme_pcie_qpair_ring_sq_doorbell(qpair); 956 pqpair->last_sq_tail = pqpair->sq_tail; 957 } 958 } 959 960 if (spdk_unlikely(ctrlr->timeout_enabled)) { 961 /* 962 * User registered for timeout callback 963 */ 964 nvme_pcie_qpair_check_timeout(qpair); 965 } 966 967 /* Before returning, complete any pending admin request or 968 * process the admin qpair disconnection. 969 */ 970 if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) { 971 nvme_pcie_qpair_complete_pending_admin_request(qpair); 972 973 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) { 974 rc = nvme_ctrlr_disable_poll(qpair->ctrlr); 975 if (rc != -EAGAIN) { 976 nvme_transport_ctrlr_disconnect_qpair_done(qpair); 977 } 978 } 979 980 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 981 } 982 983 if (spdk_unlikely(pqpair->flags.has_pending_vtophys_failures)) { 984 struct nvme_tracker *tr, *tmp; 985 986 TAILQ_FOREACH_SAFE(tr, &pqpair->outstanding_tr, tq_list, tmp) { 987 if (tr->bad_vtophys) { 988 tr->bad_vtophys = 0; 989 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 990 } 991 } 992 pqpair->flags.has_pending_vtophys_failures = 0; 993 } 994 995 return num_completions; 996 } 997 998 int 999 nvme_pcie_qpair_destroy(struct spdk_nvme_qpair *qpair) 1000 { 1001 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 1002 1003 if (nvme_qpair_is_admin_queue(qpair)) { 1004 nvme_pcie_admin_qpair_destroy(qpair); 1005 } 1006 /* 1007 * We check sq_vaddr and cq_vaddr to see if the user specified the memory 1008 * buffers when creating the I/O queue. 1009 * If the user specified them, we cannot free that memory. 1010 * Nor do we free it if it's in the CMB. 1011 */ 1012 if (!pqpair->sq_vaddr && pqpair->cmd && !pqpair->sq_in_cmb) { 1013 spdk_free(pqpair->cmd); 1014 } 1015 if (!pqpair->cq_vaddr && pqpair->cpl) { 1016 spdk_free(pqpair->cpl); 1017 } 1018 if (pqpair->tr) { 1019 spdk_free(pqpair->tr); 1020 } 1021 1022 nvme_qpair_deinit(qpair); 1023 1024 if (!pqpair->shared_stats && (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr))) { 1025 if (qpair->id) { 1026 free(pqpair->stat); 1027 } else { 1028 /* statistics of admin qpair are allocates from huge pages because 1029 * admin qpair is shared for multi-process */ 1030 spdk_free(pqpair->stat); 1031 } 1032 1033 } 1034 1035 spdk_free(pqpair); 1036 1037 return 0; 1038 } 1039 1040 struct spdk_nvme_qpair * 1041 nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid, 1042 const struct spdk_nvme_io_qpair_opts *opts) 1043 { 1044 struct nvme_pcie_qpair *pqpair; 1045 struct spdk_nvme_qpair *qpair; 1046 int rc; 1047 1048 assert(ctrlr != NULL); 1049 1050 pqpair = spdk_zmalloc(sizeof(*pqpair), 64, NULL, 1051 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE); 1052 if (pqpair == NULL) { 1053 return NULL; 1054 } 1055 1056 pqpair->num_entries = opts->io_queue_size; 1057 pqpair->flags.delay_cmd_submit = opts->delay_cmd_submit; 1058 1059 qpair = &pqpair->qpair; 1060 1061 rc = nvme_qpair_init(qpair, qid, ctrlr, opts->qprio, opts->io_queue_requests, opts->async_mode); 1062 if (rc != 0) { 1063 nvme_pcie_qpair_destroy(qpair); 1064 return NULL; 1065 } 1066 1067 rc = nvme_pcie_qpair_construct(qpair, opts); 1068 1069 if (rc != 0) { 1070 nvme_pcie_qpair_destroy(qpair); 1071 return NULL; 1072 } 1073 1074 return qpair; 1075 } 1076 1077 int 1078 nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair) 1079 { 1080 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 1081 struct nvme_completion_poll_status *status; 1082 int rc; 1083 1084 assert(ctrlr != NULL); 1085 1086 if (ctrlr->is_removed) { 1087 goto free; 1088 } 1089 1090 if (ctrlr->prepare_for_reset) { 1091 if (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) { 1092 pqpair->flags.defer_destruction = true; 1093 } 1094 goto clear_shadow_doorbells; 1095 } 1096 1097 /* If attempting to delete a qpair that's still being connected, we have to wait until it's 1098 * finished, so that we don't free it while it's waiting for the create cq/sq callbacks. 1099 */ 1100 while (pqpair->pcie_state == NVME_PCIE_QPAIR_WAIT_FOR_CQ || 1101 pqpair->pcie_state == NVME_PCIE_QPAIR_WAIT_FOR_SQ) { 1102 rc = spdk_nvme_qpair_process_completions(ctrlr->adminq, 0); 1103 if (rc < 0) { 1104 break; 1105 } 1106 } 1107 1108 status = calloc(1, sizeof(*status)); 1109 if (!status) { 1110 SPDK_ERRLOG("Failed to allocate status tracker\n"); 1111 goto free; 1112 } 1113 1114 /* Delete the I/O submission queue */ 1115 rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, status); 1116 if (rc != 0) { 1117 SPDK_ERRLOG("Failed to send request to delete_io_sq with rc=%d\n", rc); 1118 free(status); 1119 goto free; 1120 } 1121 if (nvme_wait_for_completion(ctrlr->adminq, status)) { 1122 if (!status->timed_out) { 1123 free(status); 1124 } 1125 goto free; 1126 } 1127 1128 /* Now that the submission queue is deleted, the device is supposed to have 1129 * completed any outstanding I/O. Try to complete them. If they don't complete, 1130 * they'll be marked as aborted and completed below. */ 1131 if (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr)) { 1132 nvme_pcie_qpair_process_completions(qpair, 0); 1133 } 1134 1135 memset(status, 0, sizeof(*status)); 1136 /* Delete the completion queue */ 1137 rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, status); 1138 if (rc != 0) { 1139 SPDK_ERRLOG("Failed to send request to delete_io_cq with rc=%d\n", rc); 1140 free(status); 1141 goto free; 1142 } 1143 if (nvme_wait_for_completion(ctrlr->adminq, status)) { 1144 if (!status->timed_out) { 1145 free(status); 1146 } 1147 goto free; 1148 } 1149 free(status); 1150 1151 clear_shadow_doorbells: 1152 if (pqpair->flags.has_shadow_doorbell) { 1153 *pqpair->shadow_doorbell.sq_tdbl = 0; 1154 *pqpair->shadow_doorbell.cq_hdbl = 0; 1155 *pqpair->shadow_doorbell.sq_eventidx = 0; 1156 *pqpair->shadow_doorbell.cq_eventidx = 0; 1157 } 1158 free: 1159 if (qpair->no_deletion_notification_needed == 0) { 1160 /* Abort the rest of the I/O */ 1161 nvme_pcie_qpair_abort_trackers(qpair, 1); 1162 } 1163 1164 if (!pqpair->flags.defer_destruction) { 1165 nvme_pcie_qpair_destroy(qpair); 1166 } 1167 return 0; 1168 } 1169 1170 static void 1171 nvme_pcie_fail_request_bad_vtophys(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr) 1172 { 1173 if (!qpair->in_completion_context) { 1174 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 1175 1176 tr->bad_vtophys = 1; 1177 pqpair->flags.has_pending_vtophys_failures = 1; 1178 return; 1179 } 1180 1181 /* 1182 * Bad vtophys translation, so abort this request and return 1183 * immediately. 1184 */ 1185 SPDK_ERRLOG("vtophys or other payload buffer related error\n"); 1186 nvme_pcie_qpair_manual_complete_tracker(qpair, tr, SPDK_NVME_SCT_GENERIC, 1187 SPDK_NVME_SC_INVALID_FIELD, 1188 1 /* do not retry */, true); 1189 } 1190 1191 /* 1192 * Append PRP list entries to describe a virtually contiguous buffer starting at virt_addr of len bytes. 1193 * 1194 * *prp_index will be updated to account for the number of PRP entries used. 1195 */ 1196 static inline int 1197 nvme_pcie_prp_list_append(struct spdk_nvme_ctrlr *ctrlr, struct nvme_tracker *tr, 1198 uint32_t *prp_index, void *virt_addr, size_t len, 1199 uint32_t page_size) 1200 { 1201 struct spdk_nvme_cmd *cmd = &tr->req->cmd; 1202 uintptr_t page_mask = page_size - 1; 1203 uint64_t phys_addr; 1204 uint32_t i; 1205 1206 SPDK_DEBUGLOG(nvme, "prp_index:%u virt_addr:%p len:%u\n", 1207 *prp_index, virt_addr, (uint32_t)len); 1208 1209 if (spdk_unlikely(((uintptr_t)virt_addr & 3) != 0)) { 1210 SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr); 1211 return -EFAULT; 1212 } 1213 1214 i = *prp_index; 1215 while (len) { 1216 uint32_t seg_len; 1217 1218 /* 1219 * prp_index 0 is stored in prp1, and the rest are stored in the prp[] array, 1220 * so prp_index == count is valid. 1221 */ 1222 if (spdk_unlikely(i > SPDK_COUNTOF(tr->u.prp))) { 1223 SPDK_ERRLOG("out of PRP entries\n"); 1224 return -EFAULT; 1225 } 1226 1227 phys_addr = nvme_pcie_vtophys(ctrlr, virt_addr, NULL); 1228 if (spdk_unlikely(phys_addr == SPDK_VTOPHYS_ERROR)) { 1229 SPDK_ERRLOG("vtophys(%p) failed\n", virt_addr); 1230 return -EFAULT; 1231 } 1232 1233 if (i == 0) { 1234 SPDK_DEBUGLOG(nvme, "prp1 = %p\n", (void *)phys_addr); 1235 cmd->dptr.prp.prp1 = phys_addr; 1236 seg_len = page_size - ((uintptr_t)virt_addr & page_mask); 1237 } else { 1238 if ((phys_addr & page_mask) != 0) { 1239 SPDK_ERRLOG("PRP %u not page aligned (%p)\n", i, virt_addr); 1240 return -EFAULT; 1241 } 1242 1243 SPDK_DEBUGLOG(nvme, "prp[%u] = %p\n", i - 1, (void *)phys_addr); 1244 tr->u.prp[i - 1] = phys_addr; 1245 seg_len = page_size; 1246 } 1247 1248 seg_len = spdk_min(seg_len, len); 1249 virt_addr = (uint8_t *)virt_addr + seg_len; 1250 len -= seg_len; 1251 i++; 1252 } 1253 1254 cmd->psdt = SPDK_NVME_PSDT_PRP; 1255 if (i <= 1) { 1256 cmd->dptr.prp.prp2 = 0; 1257 } else if (i == 2) { 1258 cmd->dptr.prp.prp2 = tr->u.prp[0]; 1259 SPDK_DEBUGLOG(nvme, "prp2 = %p\n", (void *)cmd->dptr.prp.prp2); 1260 } else { 1261 cmd->dptr.prp.prp2 = tr->prp_sgl_bus_addr; 1262 SPDK_DEBUGLOG(nvme, "prp2 = %p (PRP list)\n", (void *)cmd->dptr.prp.prp2); 1263 } 1264 1265 *prp_index = i; 1266 return 0; 1267 } 1268 1269 static int 1270 nvme_pcie_qpair_build_request_invalid(struct spdk_nvme_qpair *qpair, 1271 struct nvme_request *req, struct nvme_tracker *tr, bool dword_aligned) 1272 { 1273 assert(0); 1274 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1275 return -EINVAL; 1276 } 1277 1278 /** 1279 * Build PRP list describing physically contiguous payload buffer. 1280 */ 1281 static int 1282 nvme_pcie_qpair_build_contig_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req, 1283 struct nvme_tracker *tr, bool dword_aligned) 1284 { 1285 uint32_t prp_index = 0; 1286 int rc; 1287 1288 rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index, 1289 (uint8_t *)req->payload.contig_or_cb_arg + req->payload_offset, 1290 req->payload_size, qpair->ctrlr->page_size); 1291 if (rc) { 1292 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1293 } 1294 1295 return rc; 1296 } 1297 1298 /** 1299 * Build an SGL describing a physically contiguous payload buffer. 1300 * 1301 * This is more efficient than using PRP because large buffers can be 1302 * described this way. 1303 */ 1304 static int 1305 nvme_pcie_qpair_build_contig_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req, 1306 struct nvme_tracker *tr, bool dword_aligned) 1307 { 1308 uint8_t *virt_addr; 1309 uint64_t phys_addr, mapping_length; 1310 uint32_t length; 1311 struct spdk_nvme_sgl_descriptor *sgl; 1312 uint32_t nseg = 0; 1313 1314 assert(req->payload_size != 0); 1315 assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG); 1316 1317 sgl = tr->u.sgl; 1318 req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG; 1319 req->cmd.dptr.sgl1.unkeyed.subtype = 0; 1320 1321 length = req->payload_size; 1322 /* ubsan complains about applying zero offset to null pointer if contig_or_cb_arg is NULL, 1323 * so just double cast it to make it go away */ 1324 virt_addr = (uint8_t *)((uintptr_t)req->payload.contig_or_cb_arg + req->payload_offset); 1325 1326 while (length > 0) { 1327 if (nseg >= NVME_MAX_SGL_DESCRIPTORS) { 1328 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1329 return -EFAULT; 1330 } 1331 1332 if (dword_aligned && ((uintptr_t)virt_addr & 3)) { 1333 SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr); 1334 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1335 return -EFAULT; 1336 } 1337 1338 mapping_length = length; 1339 phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length); 1340 if (phys_addr == SPDK_VTOPHYS_ERROR) { 1341 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1342 return -EFAULT; 1343 } 1344 1345 mapping_length = spdk_min(length, mapping_length); 1346 1347 length -= mapping_length; 1348 virt_addr += mapping_length; 1349 1350 sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 1351 sgl->unkeyed.length = mapping_length; 1352 sgl->address = phys_addr; 1353 sgl->unkeyed.subtype = 0; 1354 1355 sgl++; 1356 nseg++; 1357 } 1358 1359 if (nseg == 1) { 1360 /* 1361 * The whole transfer can be described by a single SGL descriptor. 1362 * Use the special case described by the spec where SGL1's type is Data Block. 1363 * This means the SGL in the tracker is not used at all, so copy the first (and only) 1364 * SGL element into SGL1. 1365 */ 1366 req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 1367 req->cmd.dptr.sgl1.address = tr->u.sgl[0].address; 1368 req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length; 1369 } else { 1370 /* SPDK NVMe driver supports only 1 SGL segment for now, it is enough because 1371 * NVME_MAX_SGL_DESCRIPTORS * 16 is less than one page. 1372 */ 1373 req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1374 req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr; 1375 req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor); 1376 } 1377 1378 return 0; 1379 } 1380 1381 /** 1382 * Build SGL list describing scattered payload buffer. 1383 */ 1384 static int 1385 nvme_pcie_qpair_build_hw_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req, 1386 struct nvme_tracker *tr, bool dword_aligned) 1387 { 1388 int rc; 1389 void *virt_addr; 1390 uint64_t phys_addr, mapping_length; 1391 uint32_t remaining_transfer_len, remaining_user_sge_len, length; 1392 struct spdk_nvme_sgl_descriptor *sgl; 1393 uint32_t nseg = 0; 1394 1395 /* 1396 * Build scattered payloads. 1397 */ 1398 assert(req->payload_size != 0); 1399 assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL); 1400 assert(req->payload.reset_sgl_fn != NULL); 1401 assert(req->payload.next_sge_fn != NULL); 1402 req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset); 1403 1404 sgl = tr->u.sgl; 1405 req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG; 1406 req->cmd.dptr.sgl1.unkeyed.subtype = 0; 1407 1408 remaining_transfer_len = req->payload_size; 1409 1410 while (remaining_transfer_len > 0) { 1411 rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, 1412 &virt_addr, &remaining_user_sge_len); 1413 if (rc) { 1414 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1415 return -EFAULT; 1416 } 1417 1418 /* Bit Bucket SGL descriptor */ 1419 if ((uint64_t)virt_addr == UINT64_MAX) { 1420 /* TODO: enable WRITE and COMPARE when necessary */ 1421 if (req->cmd.opc != SPDK_NVME_OPC_READ) { 1422 SPDK_ERRLOG("Only READ command can be supported\n"); 1423 goto exit; 1424 } 1425 if (nseg >= NVME_MAX_SGL_DESCRIPTORS) { 1426 SPDK_ERRLOG("Too many SGL entries\n"); 1427 goto exit; 1428 } 1429 1430 sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_BIT_BUCKET; 1431 /* If the SGL describes a destination data buffer, the length of data 1432 * buffer shall be discarded by controller, and the length is included 1433 * in Number of Logical Blocks (NLB) parameter. Otherwise, the length 1434 * is not included in the NLB parameter. 1435 */ 1436 remaining_user_sge_len = spdk_min(remaining_user_sge_len, remaining_transfer_len); 1437 remaining_transfer_len -= remaining_user_sge_len; 1438 1439 sgl->unkeyed.length = remaining_user_sge_len; 1440 sgl->address = 0; 1441 sgl->unkeyed.subtype = 0; 1442 1443 sgl++; 1444 nseg++; 1445 1446 continue; 1447 } 1448 1449 remaining_user_sge_len = spdk_min(remaining_user_sge_len, remaining_transfer_len); 1450 remaining_transfer_len -= remaining_user_sge_len; 1451 while (remaining_user_sge_len > 0) { 1452 if (nseg >= NVME_MAX_SGL_DESCRIPTORS) { 1453 SPDK_ERRLOG("Too many SGL entries\n"); 1454 goto exit; 1455 } 1456 1457 if (dword_aligned && ((uintptr_t)virt_addr & 3)) { 1458 SPDK_ERRLOG("virt_addr %p not dword aligned\n", virt_addr); 1459 goto exit; 1460 } 1461 1462 mapping_length = remaining_user_sge_len; 1463 phys_addr = nvme_pcie_vtophys(qpair->ctrlr, virt_addr, &mapping_length); 1464 if (phys_addr == SPDK_VTOPHYS_ERROR) { 1465 goto exit; 1466 } 1467 1468 length = spdk_min(remaining_user_sge_len, mapping_length); 1469 remaining_user_sge_len -= length; 1470 virt_addr = (uint8_t *)virt_addr + length; 1471 1472 if (nseg > 0 && phys_addr == 1473 (*(sgl - 1)).address + (*(sgl - 1)).unkeyed.length) { 1474 /* extend previous entry */ 1475 (*(sgl - 1)).unkeyed.length += length; 1476 continue; 1477 } 1478 1479 sgl->unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 1480 sgl->unkeyed.length = length; 1481 sgl->address = phys_addr; 1482 sgl->unkeyed.subtype = 0; 1483 1484 sgl++; 1485 nseg++; 1486 } 1487 } 1488 1489 if (nseg == 1) { 1490 /* 1491 * The whole transfer can be described by a single SGL descriptor. 1492 * Use the special case described by the spec where SGL1's type is Data Block. 1493 * This means the SGL in the tracker is not used at all, so copy the first (and only) 1494 * SGL element into SGL1. 1495 */ 1496 req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 1497 req->cmd.dptr.sgl1.address = tr->u.sgl[0].address; 1498 req->cmd.dptr.sgl1.unkeyed.length = tr->u.sgl[0].unkeyed.length; 1499 } else { 1500 /* SPDK NVMe driver supports only 1 SGL segment for now, it is enough because 1501 * NVME_MAX_SGL_DESCRIPTORS * 16 is less than one page. 1502 */ 1503 req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1504 req->cmd.dptr.sgl1.address = tr->prp_sgl_bus_addr; 1505 req->cmd.dptr.sgl1.unkeyed.length = nseg * sizeof(struct spdk_nvme_sgl_descriptor); 1506 } 1507 1508 return 0; 1509 1510 exit: 1511 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1512 return -EFAULT; 1513 } 1514 1515 /** 1516 * Build PRP list describing scattered payload buffer. 1517 */ 1518 static int 1519 nvme_pcie_qpair_build_prps_sgl_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req, 1520 struct nvme_tracker *tr, bool dword_aligned) 1521 { 1522 int rc; 1523 void *virt_addr; 1524 uint32_t remaining_transfer_len, length; 1525 uint32_t prp_index = 0; 1526 uint32_t page_size = qpair->ctrlr->page_size; 1527 1528 /* 1529 * Build scattered payloads. 1530 */ 1531 assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL); 1532 assert(req->payload.reset_sgl_fn != NULL); 1533 req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset); 1534 1535 remaining_transfer_len = req->payload_size; 1536 while (remaining_transfer_len > 0) { 1537 assert(req->payload.next_sge_fn != NULL); 1538 rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length); 1539 if (rc) { 1540 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1541 return -EFAULT; 1542 } 1543 1544 length = spdk_min(remaining_transfer_len, length); 1545 1546 /* 1547 * Any incompatible sges should have been handled up in the splitting routine, 1548 * but assert here as an additional check. 1549 * 1550 * All SGEs except last must end on a page boundary. 1551 */ 1552 assert((length == remaining_transfer_len) || 1553 _is_page_aligned((uintptr_t)virt_addr + length, page_size)); 1554 1555 rc = nvme_pcie_prp_list_append(qpair->ctrlr, tr, &prp_index, virt_addr, length, page_size); 1556 if (rc) { 1557 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1558 return rc; 1559 } 1560 1561 remaining_transfer_len -= length; 1562 } 1563 1564 return 0; 1565 } 1566 1567 typedef int(*build_req_fn)(struct spdk_nvme_qpair *, struct nvme_request *, struct nvme_tracker *, 1568 bool); 1569 1570 static build_req_fn const g_nvme_pcie_build_req_table[][2] = { 1571 [NVME_PAYLOAD_TYPE_INVALID] = { 1572 nvme_pcie_qpair_build_request_invalid, /* PRP */ 1573 nvme_pcie_qpair_build_request_invalid /* SGL */ 1574 }, 1575 [NVME_PAYLOAD_TYPE_CONTIG] = { 1576 nvme_pcie_qpair_build_contig_request, /* PRP */ 1577 nvme_pcie_qpair_build_contig_hw_sgl_request /* SGL */ 1578 }, 1579 [NVME_PAYLOAD_TYPE_SGL] = { 1580 nvme_pcie_qpair_build_prps_sgl_request, /* PRP */ 1581 nvme_pcie_qpair_build_hw_sgl_request /* SGL */ 1582 } 1583 }; 1584 1585 static int 1586 nvme_pcie_qpair_build_metadata(struct spdk_nvme_qpair *qpair, struct nvme_tracker *tr, 1587 bool sgl_supported, bool mptr_sgl_supported, bool dword_aligned) 1588 { 1589 void *md_payload; 1590 struct nvme_request *req = tr->req; 1591 uint64_t mapping_length; 1592 1593 if (req->payload.md) { 1594 md_payload = (uint8_t *)req->payload.md + req->md_offset; 1595 if (dword_aligned && ((uintptr_t)md_payload & 3)) { 1596 SPDK_ERRLOG("virt_addr %p not dword aligned\n", md_payload); 1597 goto exit; 1598 } 1599 1600 mapping_length = req->md_size; 1601 if (sgl_supported && mptr_sgl_supported && dword_aligned) { 1602 assert(req->cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG); 1603 req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL; 1604 1605 tr->meta_sgl.address = nvme_pcie_vtophys(qpair->ctrlr, md_payload, &mapping_length); 1606 if (tr->meta_sgl.address == SPDK_VTOPHYS_ERROR || mapping_length != req->md_size) { 1607 goto exit; 1608 } 1609 tr->meta_sgl.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 1610 tr->meta_sgl.unkeyed.length = req->md_size; 1611 tr->meta_sgl.unkeyed.subtype = 0; 1612 req->cmd.mptr = tr->prp_sgl_bus_addr - sizeof(struct spdk_nvme_sgl_descriptor); 1613 } else { 1614 req->cmd.mptr = nvme_pcie_vtophys(qpair->ctrlr, md_payload, &mapping_length); 1615 if (req->cmd.mptr == SPDK_VTOPHYS_ERROR || mapping_length != req->md_size) { 1616 goto exit; 1617 } 1618 } 1619 } 1620 1621 return 0; 1622 1623 exit: 1624 nvme_pcie_fail_request_bad_vtophys(qpair, tr); 1625 return -EINVAL; 1626 } 1627 1628 int 1629 nvme_pcie_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req) 1630 { 1631 struct nvme_tracker *tr; 1632 int rc = 0; 1633 struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr; 1634 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 1635 enum nvme_payload_type payload_type; 1636 bool sgl_supported; 1637 bool mptr_sgl_supported; 1638 bool dword_aligned = true; 1639 1640 if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) { 1641 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 1642 } 1643 1644 tr = TAILQ_FIRST(&pqpair->free_tr); 1645 1646 if (tr == NULL) { 1647 pqpair->stat->queued_requests++; 1648 /* Inform the upper layer to try again later. */ 1649 rc = -EAGAIN; 1650 goto exit; 1651 } 1652 1653 pqpair->stat->submitted_requests++; 1654 TAILQ_REMOVE(&pqpair->free_tr, tr, tq_list); /* remove tr from free_tr */ 1655 TAILQ_INSERT_TAIL(&pqpair->outstanding_tr, tr, tq_list); 1656 tr->req = req; 1657 tr->cb_fn = req->cb_fn; 1658 tr->cb_arg = req->cb_arg; 1659 req->cmd.cid = tr->cid; 1660 1661 if (req->payload_size != 0) { 1662 payload_type = nvme_payload_type(&req->payload); 1663 /* According to the specification, PRPs shall be used for all 1664 * Admin commands for NVMe over PCIe implementations. 1665 */ 1666 sgl_supported = (ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) != 0 && 1667 !nvme_qpair_is_admin_queue(qpair); 1668 mptr_sgl_supported = (ctrlr->flags & SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED) != 0 && 1669 !nvme_qpair_is_admin_queue(qpair); 1670 1671 if (sgl_supported) { 1672 /* Don't use SGL for DSM command */ 1673 if (spdk_unlikely((ctrlr->quirks & NVME_QUIRK_NO_SGL_FOR_DSM) && 1674 (req->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT))) { 1675 sgl_supported = false; 1676 } 1677 } 1678 1679 if (sgl_supported && !(ctrlr->flags & SPDK_NVME_CTRLR_SGL_REQUIRES_DWORD_ALIGNMENT)) { 1680 dword_aligned = false; 1681 } 1682 1683 /* If we fail to build the request or the metadata, do not return the -EFAULT back up 1684 * the stack. This ensures that we always fail these types of requests via a 1685 * completion callback, and never in the context of the submission. 1686 */ 1687 rc = g_nvme_pcie_build_req_table[payload_type][sgl_supported](qpair, req, tr, dword_aligned); 1688 if (rc < 0) { 1689 assert(rc == -EFAULT); 1690 rc = 0; 1691 goto exit; 1692 } 1693 1694 rc = nvme_pcie_qpair_build_metadata(qpair, tr, sgl_supported, mptr_sgl_supported, dword_aligned); 1695 if (rc < 0) { 1696 assert(rc == -EFAULT); 1697 rc = 0; 1698 goto exit; 1699 } 1700 } 1701 1702 nvme_pcie_qpair_submit_tracker(qpair, tr); 1703 1704 exit: 1705 if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) { 1706 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 1707 } 1708 1709 return rc; 1710 } 1711 1712 struct spdk_nvme_transport_poll_group * 1713 nvme_pcie_poll_group_create(void) 1714 { 1715 struct nvme_pcie_poll_group *group = calloc(1, sizeof(*group)); 1716 1717 if (group == NULL) { 1718 SPDK_ERRLOG("Unable to allocate poll group.\n"); 1719 return NULL; 1720 } 1721 1722 return &group->group; 1723 } 1724 1725 int 1726 nvme_pcie_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 1727 { 1728 return 0; 1729 } 1730 1731 int 1732 nvme_pcie_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 1733 { 1734 return 0; 1735 } 1736 1737 int 1738 nvme_pcie_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup, 1739 struct spdk_nvme_qpair *qpair) 1740 { 1741 return 0; 1742 } 1743 1744 int 1745 nvme_pcie_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup, 1746 struct spdk_nvme_qpair *qpair) 1747 { 1748 struct nvme_pcie_qpair *pqpair = nvme_pcie_qpair(qpair); 1749 1750 pqpair->stat = &g_dummy_stat; 1751 return 0; 1752 } 1753 1754 int64_t 1755 nvme_pcie_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup, 1756 uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1757 { 1758 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1759 int32_t local_completions = 0; 1760 int64_t total_completions = 0; 1761 1762 STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) { 1763 disconnected_qpair_cb(qpair, tgroup->group->ctx); 1764 } 1765 1766 STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) { 1767 local_completions = spdk_nvme_qpair_process_completions(qpair, completions_per_qpair); 1768 if (spdk_unlikely(local_completions < 0)) { 1769 disconnected_qpair_cb(qpair, tgroup->group->ctx); 1770 total_completions = -ENXIO; 1771 } else if (spdk_likely(total_completions >= 0)) { 1772 total_completions += local_completions; 1773 } 1774 } 1775 1776 return total_completions; 1777 } 1778 1779 int 1780 nvme_pcie_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup) 1781 { 1782 if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) { 1783 return -EBUSY; 1784 } 1785 1786 free(tgroup); 1787 1788 return 0; 1789 } 1790 1791 int 1792 nvme_pcie_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup, 1793 struct spdk_nvme_transport_poll_group_stat **_stats) 1794 { 1795 struct nvme_pcie_poll_group *group; 1796 struct spdk_nvme_transport_poll_group_stat *stats; 1797 1798 if (tgroup == NULL || _stats == NULL) { 1799 SPDK_ERRLOG("Invalid stats or group pointer\n"); 1800 return -EINVAL; 1801 } 1802 1803 stats = calloc(1, sizeof(*stats)); 1804 if (!stats) { 1805 SPDK_ERRLOG("Can't allocate memory for stats\n"); 1806 return -ENOMEM; 1807 } 1808 stats->trtype = SPDK_NVME_TRANSPORT_PCIE; 1809 group = SPDK_CONTAINEROF(tgroup, struct nvme_pcie_poll_group, group); 1810 memcpy(&stats->pcie, &group->stats, sizeof(group->stats)); 1811 1812 *_stats = stats; 1813 1814 return 0; 1815 } 1816 1817 void 1818 nvme_pcie_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup, 1819 struct spdk_nvme_transport_poll_group_stat *stats) 1820 { 1821 free(stats); 1822 } 1823 1824 SPDK_TRACE_REGISTER_FN(nvme_pcie, "nvme_pcie", TRACE_GROUP_NVME_PCIE) 1825 { 1826 struct spdk_trace_tpoint_opts opts[] = { 1827 { 1828 "NVME_PCIE_SUBMIT", TRACE_NVME_PCIE_SUBMIT, 1829 OWNER_NVME_PCIE_QP, OBJECT_NVME_PCIE_REQ, 1, 1830 { { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 }, 1831 { "cid", SPDK_TRACE_ARG_TYPE_INT, 4 }, 1832 { "opc", SPDK_TRACE_ARG_TYPE_INT, 4 }, 1833 { "dw10", SPDK_TRACE_ARG_TYPE_PTR, 4 }, 1834 { "dw11", SPDK_TRACE_ARG_TYPE_PTR, 4 }, 1835 { "dw12", SPDK_TRACE_ARG_TYPE_PTR, 4 } 1836 } 1837 }, 1838 { 1839 "NVME_PCIE_COMPLETE", TRACE_NVME_PCIE_COMPLETE, 1840 OWNER_NVME_PCIE_QP, OBJECT_NVME_PCIE_REQ, 0, 1841 { { "ctx", SPDK_TRACE_ARG_TYPE_PTR, 8 }, 1842 { "cid", SPDK_TRACE_ARG_TYPE_INT, 4 }, 1843 { "cpl", SPDK_TRACE_ARG_TYPE_PTR, 4 } 1844 } 1845 }, 1846 }; 1847 1848 spdk_trace_register_object(OBJECT_NVME_PCIE_REQ, 'p'); 1849 spdk_trace_register_owner(OWNER_NVME_PCIE_QP, 'q'); 1850 spdk_trace_register_description_ext(opts, SPDK_COUNTOF(opts)); 1851 } 1852