1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "nvme_internal.h" 35 36 int 37 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr, 38 struct spdk_nvme_qpair *qpair, 39 struct spdk_nvme_cmd *cmd, 40 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 41 { 42 struct nvme_request *req; 43 struct nvme_payload payload; 44 45 if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) { 46 return -EINVAL; 47 } 48 49 memset(&payload, 0, sizeof(payload)); 50 req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg); 51 52 if (req == NULL) { 53 return -ENOMEM; 54 } 55 56 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 57 58 return nvme_qpair_submit_request(qpair, req); 59 } 60 61 int 62 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr, 63 struct spdk_nvme_qpair *qpair, 64 struct spdk_nvme_cmd *cmd, 65 void *buf, uint32_t len, 66 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 67 { 68 struct nvme_request *req; 69 70 req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg); 71 72 if (req == NULL) { 73 return -ENOMEM; 74 } 75 76 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 77 78 return nvme_qpair_submit_request(qpair, req); 79 } 80 81 int 82 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr, 83 struct spdk_nvme_qpair *qpair, 84 struct spdk_nvme_cmd *cmd, 85 void *buf, uint32_t len, void *md_buf, 86 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 87 { 88 struct nvme_request *req; 89 struct nvme_payload payload; 90 uint32_t md_len = 0; 91 92 payload = NVME_PAYLOAD_CONTIG(buf, md_buf); 93 94 /* Caculate metadata length */ 95 if (md_buf) { 96 struct spdk_nvme_ns *ns = &ctrlr->ns[cmd->nsid - 1]; 97 98 assert(ns->sector_size != 0); 99 md_len = len / ns->sector_size * ns->md_size; 100 } 101 102 req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg); 103 if (req == NULL) { 104 return -ENOMEM; 105 } 106 107 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 108 109 return nvme_qpair_submit_request(qpair, req); 110 } 111 112 int 113 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 114 struct spdk_nvme_cmd *cmd, 115 void *buf, uint32_t len, 116 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 117 { 118 struct nvme_request *req; 119 int rc; 120 121 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 122 req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg); 123 if (req == NULL) { 124 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 125 return -ENOMEM; 126 } 127 128 memcpy(&req->cmd, cmd, sizeof(req->cmd)); 129 130 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 131 132 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 133 return rc; 134 } 135 136 int 137 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid, 138 void *payload, size_t payload_size, 139 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 140 { 141 struct nvme_request *req; 142 struct spdk_nvme_cmd *cmd; 143 144 req = nvme_allocate_request_user_copy(ctrlr->adminq, 145 payload, payload_size, 146 cb_fn, cb_arg, false); 147 if (req == NULL) { 148 return -ENOMEM; 149 } 150 151 cmd = &req->cmd; 152 cmd->opc = SPDK_NVME_OPC_IDENTIFY; 153 cmd->cdw10_bits.identify.cns = cns; 154 cmd->cdw10_bits.identify.cntid = cntid; 155 cmd->nsid = nsid; 156 157 return nvme_ctrlr_submit_admin_request(ctrlr, req); 158 } 159 160 int 161 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 162 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 163 { 164 struct nvme_request *req; 165 struct spdk_nvme_cmd *cmd; 166 int rc; 167 168 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 169 req = nvme_allocate_request_user_copy(ctrlr->adminq, 170 payload, sizeof(struct spdk_nvme_ctrlr_list), 171 cb_fn, cb_arg, true); 172 if (req == NULL) { 173 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 174 return -ENOMEM; 175 } 176 177 cmd = &req->cmd; 178 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; 179 cmd->nsid = nsid; 180 cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH; 181 182 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 183 184 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 185 return rc; 186 } 187 188 int 189 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 190 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 191 { 192 struct nvme_request *req; 193 struct spdk_nvme_cmd *cmd; 194 int rc; 195 196 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 197 req = nvme_allocate_request_user_copy(ctrlr->adminq, 198 payload, sizeof(struct spdk_nvme_ctrlr_list), 199 cb_fn, cb_arg, true); 200 if (req == NULL) { 201 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 202 return -ENOMEM; 203 } 204 205 cmd = &req->cmd; 206 cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT; 207 cmd->nsid = nsid; 208 cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH; 209 210 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 211 212 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 213 return rc; 214 } 215 216 int 217 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload, 218 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 219 { 220 struct nvme_request *req; 221 struct spdk_nvme_cmd *cmd; 222 int rc; 223 224 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 225 req = nvme_allocate_request_user_copy(ctrlr->adminq, 226 payload, sizeof(struct spdk_nvme_ns_data), 227 cb_fn, cb_arg, true); 228 if (req == NULL) { 229 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 230 return -ENOMEM; 231 } 232 233 cmd = &req->cmd; 234 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; 235 cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE; 236 237 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 238 239 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 240 return rc; 241 } 242 243 int 244 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn, 245 void *cb_arg) 246 { 247 struct nvme_request *req; 248 struct spdk_nvme_cmd *cmd; 249 int rc; 250 251 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 252 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 253 if (req == NULL) { 254 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 255 return -ENOMEM; 256 } 257 258 cmd = &req->cmd; 259 cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT; 260 cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE; 261 cmd->nsid = nsid; 262 263 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 264 265 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 266 return rc; 267 } 268 269 int 270 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2, 271 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 272 { 273 struct nvme_request *req; 274 struct spdk_nvme_cmd *cmd; 275 int rc; 276 277 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 278 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 279 if (req == NULL) { 280 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 281 return -ENOMEM; 282 } 283 284 cmd = &req->cmd; 285 cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG; 286 cmd->dptr.prp.prp1 = prp1; 287 cmd->dptr.prp.prp2 = prp2; 288 289 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 290 291 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 292 return rc; 293 } 294 295 int 296 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format, 297 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 298 { 299 struct nvme_request *req; 300 struct spdk_nvme_cmd *cmd; 301 int rc; 302 303 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 304 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 305 if (req == NULL) { 306 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 307 return -ENOMEM; 308 } 309 310 cmd = &req->cmd; 311 cmd->opc = SPDK_NVME_OPC_FORMAT_NVM; 312 cmd->nsid = nsid; 313 memcpy(&cmd->cdw10, format, sizeof(uint32_t)); 314 315 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 316 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 317 318 return rc; 319 } 320 321 int 322 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 323 uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size, 324 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 325 { 326 struct nvme_request *req; 327 struct spdk_nvme_cmd *cmd; 328 int rc; 329 330 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 331 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 332 true); 333 if (req == NULL) { 334 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 335 return -ENOMEM; 336 } 337 338 cmd = &req->cmd; 339 cmd->opc = SPDK_NVME_OPC_SET_FEATURES; 340 cmd->cdw10_bits.set_features.fid = feature; 341 cmd->cdw11 = cdw11; 342 cmd->cdw12 = cdw12; 343 344 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 345 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 346 347 return rc; 348 } 349 350 int 351 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 352 uint32_t cdw11, void *payload, uint32_t payload_size, 353 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 354 { 355 struct nvme_request *req; 356 struct spdk_nvme_cmd *cmd; 357 int rc; 358 359 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 360 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 361 false); 362 if (req == NULL) { 363 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 364 return -ENOMEM; 365 } 366 367 cmd = &req->cmd; 368 cmd->opc = SPDK_NVME_OPC_GET_FEATURES; 369 cmd->cdw10_bits.get_features.fid = feature; 370 cmd->cdw11 = cdw11; 371 372 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 373 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 374 375 return rc; 376 } 377 378 int 379 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 380 uint32_t cdw11, void *payload, 381 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, 382 void *cb_arg, uint32_t ns_id) 383 { 384 struct nvme_request *req; 385 struct spdk_nvme_cmd *cmd; 386 int rc; 387 388 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 389 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 390 false); 391 if (req == NULL) { 392 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 393 return -ENOMEM; 394 } 395 396 cmd = &req->cmd; 397 cmd->opc = SPDK_NVME_OPC_GET_FEATURES; 398 cmd->cdw10_bits.get_features.fid = feature; 399 cmd->cdw11 = cdw11; 400 cmd->nsid = ns_id; 401 402 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 403 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 404 405 return rc; 406 } 407 408 int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature, 409 uint32_t cdw11, uint32_t cdw12, void *payload, 410 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, 411 void *cb_arg, uint32_t ns_id) 412 { 413 struct nvme_request *req; 414 struct spdk_nvme_cmd *cmd; 415 int rc; 416 417 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 418 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg, 419 true); 420 if (req == NULL) { 421 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 422 return -ENOMEM; 423 } 424 425 cmd = &req->cmd; 426 cmd->opc = SPDK_NVME_OPC_SET_FEATURES; 427 cmd->cdw10_bits.set_features.fid = feature; 428 cmd->cdw11 = cdw11; 429 cmd->cdw12 = cdw12; 430 cmd->nsid = ns_id; 431 432 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 433 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 434 435 return rc; 436 } 437 438 int 439 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr, 440 uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 441 { 442 union spdk_nvme_feat_number_of_queues feat_num_queues; 443 444 feat_num_queues.raw = 0; 445 feat_num_queues.bits.nsqr = num_queues - 1; 446 feat_num_queues.bits.ncqr = num_queues - 1; 447 448 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw, 449 0, 450 NULL, 0, cb_fn, cb_arg); 451 } 452 453 int 454 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr, 455 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 456 { 457 return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0, 458 cb_fn, cb_arg); 459 } 460 461 int 462 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr, 463 union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn, 464 void *cb_arg) 465 { 466 uint32_t cdw11; 467 468 cdw11 = config.raw; 469 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0, 470 NULL, 0, 471 cb_fn, cb_arg); 472 } 473 474 int 475 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size, 476 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 477 { 478 union spdk_nvme_feat_host_identifier feat_host_identifier; 479 480 feat_host_identifier.raw = 0; 481 if (host_id_size == 16) { 482 /* 128-bit extended host identifier */ 483 feat_host_identifier.bits.exhid = 1; 484 } else if (host_id_size == 8) { 485 /* 64-bit host identifier */ 486 feat_host_identifier.bits.exhid = 0; 487 } else { 488 SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size); 489 return -EINVAL; 490 } 491 492 return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, 493 feat_host_identifier.raw, 0, 494 host_id, host_id_size, cb_fn, cb_arg); 495 } 496 497 int 498 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page, 499 uint32_t nsid, void *payload, uint32_t payload_size, 500 uint64_t offset, uint32_t cdw10, 501 uint32_t cdw11, uint32_t cdw14, 502 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 503 { 504 struct nvme_request *req; 505 struct spdk_nvme_cmd *cmd; 506 uint32_t numd, numdl, numdu; 507 uint32_t lpol, lpou; 508 int rc; 509 510 if (payload_size == 0) { 511 return -EINVAL; 512 } 513 514 if (offset & 3) { 515 return -EINVAL; 516 } 517 518 numd = payload_size / sizeof(uint32_t) - 1u; 519 numdl = numd & 0xFFFFu; 520 numdu = (numd >> 16) & 0xFFFFu; 521 522 lpol = (uint32_t)offset; 523 lpou = (uint32_t)(offset >> 32); 524 525 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 526 527 if (offset && !ctrlr->cdata.lpa.edlp) { 528 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 529 return -EINVAL; 530 } 531 532 req = nvme_allocate_request_user_copy(ctrlr->adminq, 533 payload, payload_size, cb_fn, cb_arg, false); 534 if (req == NULL) { 535 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 536 return -ENOMEM; 537 } 538 539 cmd = &req->cmd; 540 cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE; 541 cmd->nsid = nsid; 542 cmd->cdw10 = cdw10; 543 cmd->cdw10_bits.get_log_page.numdl = numdl; 544 cmd->cdw10_bits.get_log_page.lid = log_page; 545 546 cmd->cdw11 = cdw11; 547 cmd->cdw11_bits.get_log_page.numdu = numdu; 548 cmd->cdw12 = lpol; 549 cmd->cdw13 = lpou; 550 cmd->cdw14 = cdw14; 551 552 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 553 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 554 555 return rc; 556 } 557 558 int 559 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page, 560 uint32_t nsid, void *payload, uint32_t payload_size, 561 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 562 { 563 return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload, 564 payload_size, offset, 0, 0, 0, cb_fn, cb_arg); 565 } 566 567 static void 568 spdk_nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl) 569 { 570 struct nvme_request *req, *next, *tmp; 571 struct spdk_nvme_ctrlr *ctrlr; 572 int rc; 573 574 req = ctx; 575 ctrlr = (struct spdk_nvme_ctrlr *)req->user_buffer; 576 577 ctrlr->outstanding_aborts--; 578 STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) { 579 STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq); 580 ctrlr->outstanding_aborts++; 581 rc = nvme_ctrlr_submit_admin_request(ctrlr, next); 582 if (rc < 0) { 583 SPDK_ERRLOG("Failed to submit queued abort.\n"); 584 memset(&next->cpl, 0, sizeof(next->cpl)); 585 next->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 586 next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 587 next->cpl.status.dnr = 1; 588 nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &req->cpl); 589 nvme_free_request(next); 590 } else { 591 /* If the first abort succeeds, stop iterating. */ 592 break; 593 } 594 } 595 596 req->user_cb_fn(req->user_cb_arg, cpl); 597 } 598 599 int 600 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 601 uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 602 { 603 int rc; 604 struct nvme_request *req; 605 struct spdk_nvme_cmd *cmd; 606 uint16_t sqid; 607 608 if (qpair) { 609 sqid = qpair->id; 610 } else { 611 sqid = ctrlr->adminq->id; /* 0 */ 612 } 613 614 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 615 req = nvme_allocate_request_null(ctrlr->adminq, spdk_nvme_ctrlr_cmd_abort_cpl, NULL); 616 if (req == NULL) { 617 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 618 return -ENOMEM; 619 } 620 req->cb_arg = req; 621 req->user_cb_fn = cb_fn; 622 req->user_cb_arg = cb_arg; 623 req->user_buffer = ctrlr; /* This is a hack to get to the ctrlr in the 624 * completion handler. */ 625 626 cmd = &req->cmd; 627 cmd->opc = SPDK_NVME_OPC_ABORT; 628 cmd->cdw10_bits.abort.sqid = sqid; 629 cmd->cdw10_bits.abort.cid = cid; 630 631 if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl) { 632 STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq); 633 rc = 0; 634 } else { 635 ctrlr->outstanding_aborts++; 636 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 637 } 638 639 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 640 return rc; 641 } 642 643 int 644 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, 645 const struct spdk_nvme_fw_commit *fw_commit, 646 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 647 { 648 struct nvme_request *req; 649 struct spdk_nvme_cmd *cmd; 650 int rc; 651 652 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 653 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 654 if (req == NULL) { 655 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 656 return -ENOMEM; 657 } 658 659 cmd = &req->cmd; 660 cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT; 661 memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t)); 662 663 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 664 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 665 666 return rc; 667 668 } 669 670 int 671 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr, 672 uint32_t size, uint32_t offset, void *payload, 673 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 674 { 675 struct nvme_request *req; 676 struct spdk_nvme_cmd *cmd; 677 int rc; 678 679 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 680 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true); 681 if (req == NULL) { 682 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 683 return -ENOMEM; 684 } 685 686 cmd = &req->cmd; 687 cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD; 688 cmd->cdw10 = (size >> 2) - 1; 689 cmd->cdw11 = offset >> 2; 690 691 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 692 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 693 694 return rc; 695 } 696 697 int 698 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 699 uint16_t spsp, uint8_t nssf, void *payload, 700 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 701 { 702 struct nvme_request *req; 703 struct spdk_nvme_cmd *cmd; 704 int rc; 705 706 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 707 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, 708 cb_fn, cb_arg, false); 709 if (req == NULL) { 710 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 711 return -ENOMEM; 712 } 713 714 cmd = &req->cmd; 715 cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE; 716 cmd->cdw10_bits.sec_send_recv.nssf = nssf; 717 cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp; 718 cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8); 719 cmd->cdw10_bits.sec_send_recv.secp = secp; 720 cmd->cdw11 = payload_size; 721 722 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 723 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 724 725 return rc; 726 } 727 728 int 729 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp, 730 uint16_t spsp, uint8_t nssf, void *payload, 731 uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 732 { 733 struct nvme_request *req; 734 struct spdk_nvme_cmd *cmd; 735 int rc; 736 737 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 738 req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, 739 cb_fn, cb_arg, true); 740 if (req == NULL) { 741 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 742 return -ENOMEM; 743 } 744 745 cmd = &req->cmd; 746 cmd->opc = SPDK_NVME_OPC_SECURITY_SEND; 747 cmd->cdw10_bits.sec_send_recv.nssf = nssf; 748 cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp; 749 cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8); 750 cmd->cdw10_bits.sec_send_recv.secp = secp; 751 cmd->cdw11 = payload_size; 752 753 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 754 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 755 756 return rc; 757 } 758 759 int 760 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 761 struct spdk_nvme_sanitize *sanitize, uint32_t cdw11, 762 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 763 { 764 struct nvme_request *req; 765 struct spdk_nvme_cmd *cmd; 766 int rc; 767 768 nvme_robust_mutex_lock(&ctrlr->ctrlr_lock); 769 req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg); 770 if (req == NULL) { 771 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 772 return -ENOMEM; 773 } 774 775 cmd = &req->cmd; 776 cmd->opc = SPDK_NVME_OPC_SANITIZE; 777 cmd->nsid = nsid; 778 cmd->cdw11 = cdw11; 779 memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10)); 780 781 rc = nvme_ctrlr_submit_admin_request(ctrlr, req); 782 nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock); 783 784 return rc; 785 } 786