1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2015 Intel Corporation. All rights reserved. 3 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 4 */ 5 6 #include "nvme_internal.h" 7 #include "spdk/nvme.h" 8 9 int 10 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr, 11 struct spdk_nvme_qpair *qpair, 12 struct spdk_nvme_cmd *cmd, 13 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 14 { 15 struct nvme_request *req; 16 struct nvme_payload payload; 17 18 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 19 return -EINVAL; 20 } 21 22 memset(&payload, 0, sizeof(payload)); 23 req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg); 24 25 if (req == NULL) { 26 return -ENOMEM; 27 } 28 29 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 30 31 return nvme_qpair_submit_request(qpair, req); 32 } 33 34 int 35 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_qpair *qpair, 37 struct spdk_nvme_cmd *cmd, 38 void *buf, uint32_t len, 39 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 40 { 41 struct nvme_request *req; 42 43 req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg); 44 45 if (req == NULL) { 46 return -ENOMEM; 47 } 48 49 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 50 51 return nvme_qpair_submit_request(qpair, req); 52 } 53 54 int 55 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr, 56 struct spdk_nvme_qpair *qpair, 57 struct spdk_nvme_cmd *cmd, 58 void *buf, uint32_t len, void *md_buf, 59 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 60 { 61 struct nvme_request *req; 62 struct nvme_payload payload; 63 uint32_t md_len = 0; 64 65 payload = NVME_PAYLOAD_CONTIG(buf, md_buf); 66 67 /* Calculate metadata length */ 68 if (md_buf) { 69 struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid); 70 71 assert(ns != NULL); 72 assert(ns->sector_size != 0); 73 md_len = len / ns->sector_size * ns->md_size; 74 } 75 76 req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg); 77 if (req == NULL) { 78 return -ENOMEM; 79 } 80 81 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 82 83 return nvme_qpair_submit_request(qpair, req); 84 } 85 86 int 87 spdk_nvme_ctrlr_cmd_iov_raw_with_md(struct spdk_nvme_ctrlr *ctrlr, 88 struct spdk_nvme_qpair *qpair, 89 struct spdk_nvme_cmd *cmd, 90 uint32_t len, void *md_buf, 91 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 92 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 93 spdk_nvme_req_next_sge_cb next_sge_fn) 94 { 95 struct nvme_request *req; 96 struct nvme_payload payload; 97 uint32_t md_len = 0; 98 99 if (reset_sgl_fn == NULL || next_sge_fn == NULL) { 100 return -EINVAL; 101 } 102 103 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, md_buf); 104 105 /* Calculate metadata length */ 106 if (md_buf) { 107 struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid); 108 109 assert(ns != NULL); 110 assert(ns->sector_size != 0); 111 md_len = len / ns->sector_size * ns->md_size; 112 } 113 114 req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg); 115 if (req == NULL) { 116 return -ENOMEM; 117 } 118 119 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 120 121 return nvme_qpair_submit_request(qpair, req); 122 } 123 124 int 125 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 126 struct spdk_nvme_cmd *cmd, 127 void *buf, uint32_t len, 128 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 129 { 130 struct nvme_request *req; 131 int rc; 132 133 nvme_ctrlr_lock(ctrlr); 134 req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg); 135 if (req == NULL) { 136 nvme_ctrlr_unlock(ctrlr); 137 return -ENOMEM; 138 } 139 140 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 141 142 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 143 144 nvme_ctrlr_unlock(ctrlr); 145 return rc; 146 } 147 148 int 149 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid, 150 uint8_t csi, void *payload, size_t payload_size, 151 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 152 { 153 struct nvme_request *req; 154 struct spdk_nvme_cmd *cmd; 155 int rc; 156 157 nvme_ctrlr_lock(ctrlr); 158 req = nvme_allocate_request_user_copy(ctrlr->adminq, 159 payload, payload_size, 160 cb_fn, cb_arg, false); 161 if (req == NULL) { 162 nvme_ctrlr_unlock(ctrlr); 163 return -ENOMEM; 164 } 165 166 cmd = &req->cmd; 167 cmd->opc = SPDK_NVME_OPC_IDENTIFY; 168 cmd->cdw10_bits.identify.cns = cns; 169 cmd->cdw10_bits.identify.cntid = cntid; 170 cmd->cdw11_bits.identify.csi = csi; 171 cmd->nsid = nsid; 172 173 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 174 175 nvme_ctrlr_unlock(ctrlr); 176 return rc; 177 } 178 179 int 180 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 181 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 182 { 183 struct nvme_request *req; 184 struct spdk_nvme_cmd *cmd; 185 int rc; 186 187 nvme_ctrlr_lock(ctrlr); 188 req = nvme_allocate_request_user_copy(ctrlr->adminq, 189 payload, sizeof(struct spdk_nvme_ctrlr_list), 190 cb_fn, cb_arg, true); 191 if (req == NULL) { 192 nvme_ctrlr_unlock(ctrlr); 193 return -ENOMEM; 194 } 195 196 cmd = &req->cmd; 197 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; 198 cmd->nsid = nsid; 199 cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH; 200 201 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 202 203 nvme_ctrlr_unlock(ctrlr); 204 return rc; 205 } 206 207 int 208 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 209 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 210 { 211 struct nvme_request *req; 212 struct spdk_nvme_cmd *cmd; 213 int rc; 214 215 nvme_ctrlr_lock(ctrlr); 216 req = nvme_allocate_request_user_copy(ctrlr->adminq, 217 payload, sizeof(struct spdk_nvme_ctrlr_list), 218 cb_fn, cb_arg, true); 219 if (req == NULL) { 220 nvme_ctrlr_unlock(ctrlr); 221 return -ENOMEM; 222 } 223 224 cmd = &req->cmd; 225 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; 226 cmd->nsid = nsid; 227 cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH; 228 229 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 230 231 nvme_ctrlr_unlock(ctrlr); 232 return rc; 233 } 234 235 int 236 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload, 237 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 238 { 239 struct nvme_request *req; 240 struct spdk_nvme_cmd *cmd; 241 int rc; 242 243 nvme_ctrlr_lock(ctrlr); 244 req = nvme_allocate_request_user_copy(ctrlr->adminq, 245 payload, sizeof(struct spdk_nvme_ns_data), 246 cb_fn, cb_arg, true); 247 if (req == NULL) { 248 nvme_ctrlr_unlock(ctrlr); 249 return -ENOMEM; 250 } 251 252 cmd = &req->cmd; 253 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; 254 cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE; 255 256 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 257 258 nvme_ctrlr_unlock(ctrlr); 259 return rc; 260 } 261 262 int 263 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn, 264 void *cb_arg) 265 { 266 struct nvme_request *req; 267 struct spdk_nvme_cmd *cmd; 268 int rc; 269 270 nvme_ctrlr_lock(ctrlr); 271 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 272 if (req == NULL) { 273 nvme_ctrlr_unlock(ctrlr); 274 return -ENOMEM; 275 } 276 277 cmd = &req->cmd; 278 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; 279 cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE; 280 cmd->nsid = nsid; 281 282 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 283 284 nvme_ctrlr_unlock(ctrlr); 285 return rc; 286 } 287 288 int 289 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2, 290 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 291 { 292 struct nvme_request *req; 293 struct spdk_nvme_cmd *cmd; 294 int rc; 295 296 nvme_ctrlr_lock(ctrlr); 297 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 298 if (req == NULL) { 299 nvme_ctrlr_unlock(ctrlr); 300 return -ENOMEM; 301 } 302 303 cmd = &req->cmd; 304 cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG; 305 cmd->dptr.prp.prp1 = prp1; 306 cmd->dptr.prp.prp2 = prp2; 307 308 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 309 310 nvme_ctrlr_unlock(ctrlr); 311 return rc; 312 } 313 314 int 315 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format, 316 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 317 { 318 struct nvme_request *req; 319 struct spdk_nvme_cmd *cmd; 320 int rc; 321 322 nvme_ctrlr_lock(ctrlr); 323 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 324 if (req == NULL) { 325 nvme_ctrlr_unlock(ctrlr); 326 return -ENOMEM; 327 } 328 329 cmd = &req->cmd; 330 cmd->opc = SPDK_NVME_OPC_FORMAT_NVM; 331 cmd->nsid = nsid; 332 memcpy(&cmd->cdw10, format, sizeof(uint32_t)); 333 334 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 335 nvme_ctrlr_unlock(ctrlr); 336 337 return rc; 338 } 339 340 int 341 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 342 uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size, 343 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 344 { 345 struct nvme_request *req; 346 struct spdk_nvme_cmd *cmd; 347 int rc; 348 349 nvme_ctrlr_lock(ctrlr); 350 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 351 true); 352 if (req == NULL) { 353 nvme_ctrlr_unlock(ctrlr); 354 return -ENOMEM; 355 } 356 357 cmd = &req->cmd; 358 cmd->opc = SPDK_NVME_OPC_SET_FEATURES; 359 cmd->cdw10_bits.set_features.fid = feature; 360 cmd->cdw11 = cdw11; 361 cmd->cdw12 = cdw12; 362 363 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 364 nvme_ctrlr_unlock(ctrlr); 365 366 return rc; 367 } 368 369 int 370 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 371 uint32_t cdw11, void *payload, uint32_t payload_size, 372 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 373 { 374 struct nvme_request *req; 375 struct spdk_nvme_cmd *cmd; 376 int rc; 377 378 nvme_ctrlr_lock(ctrlr); 379 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 380 false); 381 if (req == NULL) { 382 nvme_ctrlr_unlock(ctrlr); 383 return -ENOMEM; 384 } 385 386 cmd = &req->cmd; 387 cmd->opc = SPDK_NVME_OPC_GET_FEATURES; 388 cmd->cdw10_bits.get_features.fid = feature; 389 cmd->cdw11 = cdw11; 390 391 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 392 nvme_ctrlr_unlock(ctrlr); 393 394 return rc; 395 } 396 397 int 398 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 399 uint32_t cdw11, void *payload, 400 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, 401 void *cb_arg, uint32_t ns_id) 402 { 403 struct nvme_request *req; 404 struct spdk_nvme_cmd *cmd; 405 int rc; 406 407 nvme_ctrlr_lock(ctrlr); 408 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 409 false); 410 if (req == NULL) { 411 nvme_ctrlr_unlock(ctrlr); 412 return -ENOMEM; 413 } 414 415 cmd = &req->cmd; 416 cmd->opc = SPDK_NVME_OPC_GET_FEATURES; 417 cmd->cdw10_bits.get_features.fid = feature; 418 cmd->cdw11 = cdw11; 419 cmd->nsid = ns_id; 420 421 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 422 nvme_ctrlr_unlock(ctrlr); 423 424 return rc; 425 } 426 427 int 428 spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 429 uint32_t cdw11, uint32_t cdw12, void *payload, 430 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, 431 void *cb_arg, uint32_t ns_id) 432 { 433 struct nvme_request *req; 434 struct spdk_nvme_cmd *cmd; 435 int rc; 436 437 nvme_ctrlr_lock(ctrlr); 438 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 439 true); 440 if (req == NULL) { 441 nvme_ctrlr_unlock(ctrlr); 442 return -ENOMEM; 443 } 444 445 cmd = &req->cmd; 446 cmd->opc = SPDK_NVME_OPC_SET_FEATURES; 447 cmd->cdw10_bits.set_features.fid = feature; 448 cmd->cdw11 = cdw11; 449 cmd->cdw12 = cdw12; 450 cmd->nsid = ns_id; 451 452 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 453 nvme_ctrlr_unlock(ctrlr); 454 455 return rc; 456 } 457 458 int 459 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr, 460 uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 461 { 462 union spdk_nvme_feat_number_of_queues feat_num_queues; 463 464 feat_num_queues.raw = 0; 465 feat_num_queues.bits.nsqr = num_queues - 1; 466 feat_num_queues.bits.ncqr = num_queues - 1; 467 468 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw, 469 0, 470 NULL, 0, cb_fn, cb_arg); 471 } 472 473 int 474 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr, 475 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 476 { 477 return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0, 478 cb_fn, cb_arg); 479 } 480 481 int 482 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr, 483 union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn, 484 void *cb_arg) 485 { 486 uint32_t cdw11; 487 488 cdw11 = config.raw; 489 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, 490 NULL, 0, 491 cb_fn, cb_arg); 492 } 493 494 int 495 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size, 496 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 497 { 498 union spdk_nvme_feat_host_identifier feat_host_identifier; 499 500 feat_host_identifier.raw = 0; 501 if (host_id_size == 16) { 502 /* 128-bit extended host identifier */ 503 feat_host_identifier.bits.exhid = 1; 504 } else if (host_id_size == 8) { 505 /* 64-bit host identifier */ 506 feat_host_identifier.bits.exhid = 0; 507 } else { 508 SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size); 509 return -EINVAL; 510 } 511 512 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, 513 feat_host_identifier.raw, 0, 514 host_id, host_id_size, cb_fn, cb_arg); 515 } 516 517 int 518 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page, 519 uint32_t nsid, void *payload, uint32_t payload_size, 520 uint64_t offset, uint32_t cdw10, 521 uint32_t cdw11, uint32_t cdw14, 522 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 523 { 524 struct nvme_request *req; 525 struct spdk_nvme_cmd *cmd; 526 uint32_t numd, numdl, numdu; 527 uint32_t lpol, lpou; 528 int rc; 529 530 if (payload_size == 0) { 531 return -EINVAL; 532 } 533 534 if (offset & 3) { 535 return -EINVAL; 536 } 537 538 numd = spdk_nvme_bytes_to_numd(payload_size); 539 numdl = numd & 0xFFFFu; 540 numdu = (numd >> 16) & 0xFFFFu; 541 542 lpol = (uint32_t)offset; 543 lpou = (uint32_t)(offset >> 32); 544 545 nvme_ctrlr_lock(ctrlr); 546 547 if (offset && !ctrlr->cdata.lpa.edlp) { 548 nvme_ctrlr_unlock(ctrlr); 549 return -EINVAL; 550 } 551 552 req = nvme_allocate_request_user_copy(ctrlr->adminq, 553 payload, payload_size, cb_fn, cb_arg, false); 554 if (req == NULL) { 555 nvme_ctrlr_unlock(ctrlr); 556 return -ENOMEM; 557 } 558 559 cmd = &req->cmd; 560 cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE; 561 cmd->nsid = nsid; 562 cmd->cdw10 = cdw10; 563 cmd->cdw10_bits.get_log_page.numdl = numdl; 564 cmd->cdw10_bits.get_log_page.lid = log_page; 565 566 cmd->cdw11 = cdw11; 567 cmd->cdw11_bits.get_log_page.numdu = numdu; 568 cmd->cdw12 = lpol; 569 cmd->cdw13 = lpou; 570 cmd->cdw14 = cdw14; 571 572 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 573 nvme_ctrlr_unlock(ctrlr); 574 575 return rc; 576 } 577 578 int 579 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page, 580 uint32_t nsid, void *payload, uint32_t payload_size, 581 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 582 { 583 return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload, 584 payload_size, offset, 0, 0, 0, cb_fn, cb_arg); 585 } 586 587 static void 588 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr) 589 { 590 struct nvme_request *next, *tmp; 591 int rc; 592 593 if (ctrlr->is_resetting || ctrlr->is_destructed || ctrlr->is_failed) { 594 /* Don't resubmit aborts if ctrlr is failing */ 595 return; 596 } 597 598 if (spdk_nvme_ctrlr_get_admin_qp_failure_reason(ctrlr) != SPDK_NVME_QPAIR_FAILURE_NONE) { 599 /* Don't resubmit aborts if admin qpair is failed */ 600 return; 601 } 602 603 STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) { 604 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq); 605 ctrlr->outstanding_aborts++; 606 rc = nvme_ctrlr_submit_admin_request(ctrlr, next); 607 if (rc < 0) { 608 SPDK_ERRLOG("Failed to submit queued abort.\n"); 609 memset(&next->cpl, 0, sizeof(next->cpl)); 610 next->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 611 next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 612 next->cpl.status.dnr = 1; 613 nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl); 614 } else { 615 /* If the first abort succeeds, stop iterating. */ 616 break; 617 } 618 } 619 } 620 621 static int 622 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr, 623 struct nvme_request *req) 624 { 625 /* ACL is a 0's based value. */ 626 if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) { 627 STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq); 628 return 0; 629 } else { 630 ctrlr->outstanding_aborts++; 631 return nvme_ctrlr_submit_admin_request(ctrlr, req); 632 } 633 } 634 635 static void 636 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl) 637 { 638 struct nvme_request *req = ctx; 639 struct spdk_nvme_ctrlr *ctrlr; 640 641 ctrlr = req->qpair->ctrlr; 642 643 assert(ctrlr->outstanding_aborts > 0); 644 ctrlr->outstanding_aborts--; 645 nvme_ctrlr_retry_queued_abort(ctrlr); 646 647 req->user_cb_fn(req->user_cb_arg, cpl); 648 } 649 650 int 651 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 652 uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 653 { 654 int rc; 655 struct nvme_request *req; 656 struct spdk_nvme_cmd *cmd; 657 658 if (qpair == NULL) { 659 qpair = ctrlr->adminq; 660 } 661 662 nvme_ctrlr_lock(ctrlr); 663 req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL); 664 if (req == NULL) { 665 nvme_ctrlr_unlock(ctrlr); 666 return -ENOMEM; 667 } 668 req->cb_arg = req; 669 req->user_cb_fn = cb_fn; 670 req->user_cb_arg = cb_arg; 671 672 cmd = &req->cmd; 673 cmd->opc = SPDK_NVME_OPC_ABORT; 674 cmd->cdw10_bits.abort.sqid = qpair->id; 675 cmd->cdw10_bits.abort.cid = cid; 676 677 rc = _nvme_ctrlr_submit_abort_request(ctrlr, req); 678 679 nvme_ctrlr_unlock(ctrlr); 680 return rc; 681 } 682 683 static void 684 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl) 685 { 686 struct nvme_request *req = ctx; 687 struct nvme_request *parent = req->parent; 688 struct spdk_nvme_ctrlr *ctrlr; 689 690 ctrlr = req->qpair->ctrlr; 691 692 assert(ctrlr->outstanding_aborts > 0); 693 ctrlr->outstanding_aborts--; 694 nvme_ctrlr_retry_queued_abort(ctrlr); 695 696 nvme_request_remove_child(parent, req); 697 698 if (!spdk_nvme_cpl_is_abort_success(cpl)) { 699 parent->parent_status.cdw0 |= 1U; 700 } 701 702 if (parent->num_children == 0) { 703 nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair, 704 parent, &parent->parent_status); 705 } 706 } 707 708 static int 709 nvme_request_add_abort(struct nvme_request *req, void *arg) 710 { 711 struct nvme_request *parent = arg; 712 struct nvme_request *child; 713 void *cmd_cb_arg; 714 715 cmd_cb_arg = parent->user_cb_arg; 716 717 if (!nvme_request_abort_match(req, cmd_cb_arg)) { 718 return 0; 719 } 720 721 child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq, 722 nvme_complete_abort_request, NULL); 723 if (child == NULL) { 724 return -ENOMEM; 725 } 726 727 child->cb_arg = child; 728 729 child->cmd.opc = SPDK_NVME_OPC_ABORT; 730 /* Copy SQID from the parent. */ 731 child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid; 732 child->cmd.cdw10_bits.abort.cid = req->cmd.cid; 733 734 child->parent = parent; 735 736 TAILQ_INSERT_TAIL(&parent->children, child, child_tailq); 737 parent->num_children++; 738 739 return 0; 740 } 741 742 int 743 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 744 void *cmd_cb_arg, 745 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 746 { 747 int rc = 0; 748 struct nvme_request *parent, *child, *tmp; 749 bool child_failed = false; 750 int aborted = 0; 751 752 if (cmd_cb_arg == NULL) { 753 return -EINVAL; 754 } 755 756 nvme_ctrlr_lock(ctrlr); 757 758 if (qpair == NULL) { 759 qpair = ctrlr->adminq; 760 } 761 762 parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 763 if (parent == NULL) { 764 nvme_ctrlr_unlock(ctrlr); 765 766 return -ENOMEM; 767 } 768 769 TAILQ_INIT(&parent->children); 770 parent->num_children = 0; 771 772 parent->cmd.opc = SPDK_NVME_OPC_ABORT; 773 memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl)); 774 775 /* Hold SQID that the requests to abort are associated with. 776 * This will be copied to the children. 777 * 778 * CID is not set here because the parent is not submitted directly 779 * and CID is not determined until request to abort is found. 780 */ 781 parent->cmd.cdw10_bits.abort.sqid = qpair->id; 782 783 /* This is used to find request to abort. */ 784 parent->user_cb_arg = cmd_cb_arg; 785 786 /* Add an abort request for each outstanding request which has cmd_cb_arg 787 * as its callback context. 788 */ 789 rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent); 790 if (rc != 0) { 791 /* Free abort requests already added. */ 792 child_failed = true; 793 } 794 795 TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) { 796 if (spdk_likely(!child_failed)) { 797 rc = _nvme_ctrlr_submit_abort_request(ctrlr, child); 798 if (spdk_unlikely(rc != 0)) { 799 child_failed = true; 800 } 801 } else { 802 /* Free remaining abort requests. */ 803 nvme_request_remove_child(parent, child); 804 nvme_free_request(child); 805 } 806 } 807 808 if (spdk_likely(!child_failed)) { 809 /* There is no error so far. Abort requests were submitted successfully 810 * or there was no outstanding request to abort. 811 * 812 * Hence abort queued requests which has cmd_cb_arg as its callback 813 * context next. 814 */ 815 aborted = nvme_qpair_abort_queued_reqs_with_cbarg(qpair, cmd_cb_arg); 816 if (parent->num_children == 0) { 817 /* There was no outstanding request to abort. */ 818 if (aborted > 0) { 819 /* The queued requests were successfully aborted. Hence 820 * complete the parent request with success synchronously. 821 */ 822 nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair, 823 parent, &parent->parent_status); 824 } else { 825 /* There was no queued request to abort. */ 826 rc = -ENOENT; 827 } 828 } 829 } else { 830 /* Failed to add or submit abort request. */ 831 if (parent->num_children != 0) { 832 /* Return success since we must wait for those children 833 * to complete but set the parent request to failure. 834 */ 835 parent->parent_status.cdw0 |= 1U; 836 rc = 0; 837 } 838 } 839 840 if (rc != 0) { 841 nvme_free_request(parent); 842 } 843 844 nvme_ctrlr_unlock(ctrlr); 845 return rc; 846 } 847 848 int 849 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, 850 const struct spdk_nvme_fw_commit *fw_commit, 851 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 852 { 853 struct nvme_request *req; 854 struct spdk_nvme_cmd *cmd; 855 int rc; 856 857 nvme_ctrlr_lock(ctrlr); 858 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 859 if (req == NULL) { 860 nvme_ctrlr_unlock(ctrlr); 861 return -ENOMEM; 862 } 863 864 cmd = &req->cmd; 865 cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 866 memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t)); 867 868 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 869 nvme_ctrlr_unlock(ctrlr); 870 871 return rc; 872 873 } 874 875 int 876 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr, 877 uint32_t size, uint32_t offset, void *payload, 878 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 879 { 880 struct nvme_request *req; 881 struct spdk_nvme_cmd *cmd; 882 int rc; 883 884 nvme_ctrlr_lock(ctrlr); 885 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true); 886 if (req == NULL) { 887 nvme_ctrlr_unlock(ctrlr); 888 return -ENOMEM; 889 } 890 891 cmd = &req->cmd; 892 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 893 cmd->cdw10 = spdk_nvme_bytes_to_numd(size); 894 cmd->cdw11 = offset >> 2; 895 896 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 897 nvme_ctrlr_unlock(ctrlr); 898 899 return rc; 900 } 901 902 int 903 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 904 uint16_t spsp, uint8_t nssf, void *payload, 905 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 906 { 907 struct nvme_request *req; 908 struct spdk_nvme_cmd *cmd; 909 int rc; 910 911 nvme_ctrlr_lock(ctrlr); 912 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, 913 cb_fn, cb_arg, false); 914 if (req == NULL) { 915 nvme_ctrlr_unlock(ctrlr); 916 return -ENOMEM; 917 } 918 919 cmd = &req->cmd; 920 cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE; 921 cmd->cdw10_bits.sec_send_recv.nssf = nssf; 922 cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp; 923 cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8); 924 cmd->cdw10_bits.sec_send_recv.secp = secp; 925 cmd->cdw11 = payload_size; 926 927 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 928 nvme_ctrlr_unlock(ctrlr); 929 930 return rc; 931 } 932 933 int 934 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 935 uint16_t spsp, uint8_t nssf, void *payload, 936 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 937 { 938 struct nvme_request *req; 939 struct spdk_nvme_cmd *cmd; 940 int rc; 941 942 nvme_ctrlr_lock(ctrlr); 943 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, 944 cb_fn, cb_arg, true); 945 if (req == NULL) { 946 nvme_ctrlr_unlock(ctrlr); 947 return -ENOMEM; 948 } 949 950 cmd = &req->cmd; 951 cmd->opc = SPDK_NVME_OPC_SECURITY_SEND; 952 cmd->cdw10_bits.sec_send_recv.nssf = nssf; 953 cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp; 954 cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8); 955 cmd->cdw10_bits.sec_send_recv.secp = secp; 956 cmd->cdw11 = payload_size; 957 958 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 959 nvme_ctrlr_unlock(ctrlr); 960 961 return rc; 962 } 963 964 int 965 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 966 struct spdk_nvme_sanitize *sanitize, uint32_t cdw11, 967 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 968 { 969 struct nvme_request *req; 970 struct spdk_nvme_cmd *cmd; 971 int rc; 972 973 nvme_ctrlr_lock(ctrlr); 974 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 975 if (req == NULL) { 976 nvme_ctrlr_unlock(ctrlr); 977 return -ENOMEM; 978 } 979 980 cmd = &req->cmd; 981 cmd->opc = SPDK_NVME_OPC_SANITIZE; 982 cmd->nsid = nsid; 983 cmd->cdw11 = cdw11; 984 memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10)); 985 986 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 987 nvme_ctrlr_unlock(ctrlr); 988 989 return rc; 990 } 991 992 static int 993 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 994 uint32_t doper, uint32_t dtype, uint32_t dspec, 995 void *payload, uint32_t payload_size, uint32_t cdw12, 996 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 997 uint16_t opc_type, bool host_to_ctrlr) 998 { 999 struct nvme_request *req = NULL; 1000 struct spdk_nvme_cmd *cmd = NULL; 1001 int rc; 1002 1003 nvme_ctrlr_lock(ctrlr); 1004 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, 1005 cb_fn, cb_arg, host_to_ctrlr); 1006 if (req == NULL) { 1007 nvme_ctrlr_unlock(ctrlr); 1008 return -ENOMEM; 1009 } 1010 cmd = &req->cmd; 1011 cmd->opc = opc_type; 1012 cmd->nsid = nsid; 1013 1014 if ((payload_size >> 2) > 0) { 1015 cmd->cdw10 = (payload_size >> 2) - 1; 1016 } 1017 cmd->cdw11_bits.directive.doper = doper; 1018 cmd->cdw11_bits.directive.dtype = dtype; 1019 cmd->cdw11_bits.directive.dspec = dspec; 1020 cmd->cdw12 = cdw12; 1021 cmd->cdw13 = cdw13; 1022 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 1023 nvme_ctrlr_unlock(ctrlr); 1024 1025 return rc; 1026 } 1027 1028 int 1029 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 1030 uint32_t doper, uint32_t dtype, uint32_t dspec, 1031 void *payload, uint32_t payload_size, uint32_t cdw12, 1032 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1033 { 1034 return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec, 1035 payload, payload_size, cdw12, cdw13, cb_fn, cb_arg, 1036 SPDK_NVME_OPC_DIRECTIVE_SEND, true); 1037 } 1038 1039 int 1040 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 1041 uint32_t doper, uint32_t dtype, uint32_t dspec, 1042 void *payload, uint32_t payload_size, uint32_t cdw12, 1043 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1044 { 1045 return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec, 1046 payload, payload_size, cdw12, cdw13, cb_fn, cb_arg, 1047 SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false); 1048 } 1049