xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision f425b985138d03b5420aee3bd94510f7c2ca9c7f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "nvme_internal.h"
35 
36 int
37 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
38 			   struct spdk_nvme_qpair *qpair,
39 			   struct spdk_nvme_cmd *cmd,
40 			   void *buf, uint32_t len,
41 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
42 {
43 	struct nvme_request	*req;
44 
45 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
46 
47 	if (req == NULL) {
48 		return -ENOMEM;
49 	}
50 
51 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
52 
53 	return nvme_qpair_submit_request(qpair, req);
54 }
55 
56 int
57 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
58 				   struct spdk_nvme_qpair *qpair,
59 				   struct spdk_nvme_cmd *cmd,
60 				   void *buf, uint32_t len, void *md_buf,
61 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
62 {
63 	struct nvme_request *req;
64 	struct nvme_payload payload;
65 
66 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
67 
68 	req = nvme_allocate_request(qpair, &payload, len, cb_fn, cb_arg);
69 	if (req == NULL) {
70 		return -ENOMEM;
71 	}
72 
73 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
74 
75 	return nvme_qpair_submit_request(qpair, req);
76 }
77 
78 int
79 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
80 			      struct spdk_nvme_cmd *cmd,
81 			      void *buf, uint32_t len,
82 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
83 {
84 	struct nvme_request	*req;
85 	int			rc;
86 
87 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
88 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
89 	if (req == NULL) {
90 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
91 		return -ENOMEM;
92 	}
93 
94 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
95 
96 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
97 
98 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
99 	return rc;
100 }
101 
102 int
103 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
104 			void *payload, size_t payload_size,
105 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
106 {
107 	struct nvme_request *req;
108 	struct spdk_nvme_cmd *cmd;
109 
110 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
111 					      payload, payload_size,
112 					      cb_fn, cb_arg, false);
113 	if (req == NULL) {
114 		return -ENOMEM;
115 	}
116 
117 	cmd = &req->cmd;
118 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
119 	cmd->cdw10 = cns | ((uint32_t)cntid << 16);
120 	cmd->nsid = nsid;
121 
122 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
123 }
124 
125 int
126 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
127 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
128 {
129 	struct nvme_request			*req;
130 	struct spdk_nvme_cmd			*cmd;
131 	int					rc;
132 
133 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
134 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
135 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
136 					      cb_fn, cb_arg, true);
137 	if (req == NULL) {
138 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
139 		return -ENOMEM;
140 	}
141 
142 	cmd = &req->cmd;
143 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
144 	cmd->nsid = nsid;
145 	cmd->cdw10 = SPDK_NVME_NS_CTRLR_ATTACH;
146 
147 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
148 
149 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
150 	return rc;
151 }
152 
153 int
154 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
155 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
156 {
157 	struct nvme_request			*req;
158 	struct spdk_nvme_cmd			*cmd;
159 	int					rc;
160 
161 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
162 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
163 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
164 					      cb_fn, cb_arg, true);
165 	if (req == NULL) {
166 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
167 		return -ENOMEM;
168 	}
169 
170 	cmd = &req->cmd;
171 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
172 	cmd->nsid = nsid;
173 	cmd->cdw10 = SPDK_NVME_NS_CTRLR_DETACH;
174 
175 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
176 
177 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
178 	return rc;
179 }
180 
181 int
182 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
183 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
184 {
185 	struct nvme_request			*req;
186 	struct spdk_nvme_cmd			*cmd;
187 	int					rc;
188 
189 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
190 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
191 					      payload, sizeof(struct spdk_nvme_ns_data),
192 					      cb_fn, cb_arg, true);
193 	if (req == NULL) {
194 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
195 		return -ENOMEM;
196 	}
197 
198 	cmd = &req->cmd;
199 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
200 	cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_CREATE;
201 
202 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
203 
204 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
205 	return rc;
206 }
207 
208 int
209 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
210 			 void *cb_arg)
211 {
212 	struct nvme_request			*req;
213 	struct spdk_nvme_cmd			*cmd;
214 	int					rc;
215 
216 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
217 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
218 	if (req == NULL) {
219 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
220 		return -ENOMEM;
221 	}
222 
223 	cmd = &req->cmd;
224 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
225 	cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_DELETE;
226 	cmd->nsid = nsid;
227 
228 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
229 
230 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
231 	return rc;
232 }
233 
234 int
235 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
236 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
237 {
238 	struct nvme_request			*req;
239 	struct spdk_nvme_cmd			*cmd;
240 	int					rc;
241 
242 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
243 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
244 	if (req == NULL) {
245 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
246 		return -ENOMEM;
247 	}
248 
249 	cmd = &req->cmd;
250 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
251 	cmd->dptr.prp.prp1 = prp1;
252 	cmd->dptr.prp.prp2 = prp2;
253 
254 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
255 
256 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
257 	return rc;
258 }
259 
260 int
261 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
262 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
263 {
264 	struct nvme_request *req;
265 	struct spdk_nvme_cmd *cmd;
266 	int rc;
267 
268 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
269 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
270 	if (req == NULL) {
271 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
272 		return -ENOMEM;
273 	}
274 
275 	cmd = &req->cmd;
276 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
277 	cmd->nsid = nsid;
278 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
279 
280 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
281 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
282 
283 	return rc;
284 }
285 
286 int
287 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
288 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
289 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
290 {
291 	struct nvme_request *req;
292 	struct spdk_nvme_cmd *cmd;
293 	int rc;
294 
295 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
296 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
297 					      true);
298 	if (req == NULL) {
299 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
300 		return -ENOMEM;
301 	}
302 
303 	cmd = &req->cmd;
304 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
305 	cmd->cdw10 = feature;
306 	cmd->cdw11 = cdw11;
307 	cmd->cdw12 = cdw12;
308 
309 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
310 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
311 
312 	return rc;
313 }
314 
315 int
316 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
317 				uint32_t cdw11, void *payload, uint32_t payload_size,
318 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
319 {
320 	struct nvme_request *req;
321 	struct spdk_nvme_cmd *cmd;
322 	int rc;
323 
324 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
325 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
326 					      false);
327 	if (req == NULL) {
328 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
329 		return -ENOMEM;
330 	}
331 
332 	cmd = &req->cmd;
333 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
334 	cmd->cdw10 = feature;
335 	cmd->cdw11 = cdw11;
336 
337 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
338 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
339 
340 	return rc;
341 }
342 
343 int
344 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
345 				   uint32_t cdw11, void *payload,
346 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
347 				   void *cb_arg, uint32_t ns_id)
348 {
349 	struct nvme_request *req;
350 	struct spdk_nvme_cmd *cmd;
351 	int rc;
352 
353 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
354 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
355 					      false);
356 	if (req == NULL) {
357 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
358 		return -ENOMEM;
359 	}
360 
361 	cmd = &req->cmd;
362 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
363 	cmd->cdw10 = feature;
364 	cmd->cdw11 = cdw11;
365 	cmd->nsid = ns_id;
366 
367 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
368 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
369 
370 	return rc;
371 }
372 
373 int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
374 				       uint32_t cdw11, uint32_t cdw12, void *payload,
375 				       uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
376 				       void *cb_arg, uint32_t ns_id)
377 {
378 	struct nvme_request *req;
379 	struct spdk_nvme_cmd *cmd;
380 	int rc;
381 
382 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
383 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
384 					      true);
385 	if (req == NULL) {
386 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
387 		return -ENOMEM;
388 	}
389 
390 	cmd = &req->cmd;
391 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
392 	cmd->cdw10 = feature;
393 	cmd->cdw11 = cdw11;
394 	cmd->cdw12 = cdw12;
395 	cmd->nsid = ns_id;
396 
397 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
398 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
399 
400 	return rc;
401 }
402 
403 int
404 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
405 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
406 {
407 	uint32_t cdw11;
408 
409 	cdw11 = ((num_queues - 1) << 16) | (num_queues - 1);
410 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, cdw11, 0,
411 					       NULL, 0, cb_fn, cb_arg);
412 }
413 
414 int
415 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
416 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
417 {
418 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
419 					       cb_fn, cb_arg);
420 }
421 
422 int
423 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
424 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
425 				      void *cb_arg)
426 {
427 	uint32_t cdw11;
428 
429 	cdw11 = config.raw;
430 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
431 					       NULL, 0,
432 					       cb_fn, cb_arg);
433 }
434 
435 int
436 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
437 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
438 {
439 	uint32_t cdw11;
440 
441 	if (host_id_size == 16) {
442 		/* 128-bit extended host identifier */
443 		cdw11 = 1;
444 	} else if (host_id_size == 8) {
445 		/* 64-bit host identifier */
446 		cdw11 = 0;
447 	} else {
448 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
449 		return -EINVAL;
450 	}
451 
452 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER, cdw11, 0,
453 					       host_id, host_id_size, cb_fn, cb_arg);
454 }
455 
456 int
457 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
458 				 uint32_t nsid, void *payload, uint32_t payload_size,
459 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
460 {
461 	struct nvme_request *req;
462 	struct spdk_nvme_cmd *cmd;
463 	uint32_t numd, numdl, numdu;
464 	uint32_t lpol, lpou;
465 	int rc;
466 
467 	if (payload_size == 0) {
468 		return -EINVAL;
469 	}
470 
471 	if (offset & 3) {
472 		return -EINVAL;
473 	}
474 
475 	numd = payload_size / sizeof(uint32_t) - 1u;
476 	numdl = numd & 0xFFFFu;
477 	numdu = (numd >> 16) & 0xFFFFu;
478 
479 	lpol = (uint32_t)offset;
480 	lpou = (uint32_t)(offset >> 32);
481 
482 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
483 
484 	if (offset && !ctrlr->cdata.lpa.edlp) {
485 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
486 		return -EINVAL;
487 	}
488 
489 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
490 					      payload, payload_size, cb_fn, cb_arg, false);
491 	if (req == NULL) {
492 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
493 		return -ENOMEM;
494 	}
495 
496 	cmd = &req->cmd;
497 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
498 	cmd->nsid = nsid;
499 	cmd->cdw10 = numdl << 16;
500 	cmd->cdw10 |= log_page;
501 	cmd->cdw11 = numdu;
502 	cmd->cdw12 = lpol;
503 	cmd->cdw13 = lpou;
504 
505 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
506 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
507 
508 	return rc;
509 }
510 
511 static void
512 spdk_nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
513 {
514 	struct nvme_request	*req, *next, *tmp;
515 	struct spdk_nvme_ctrlr	*ctrlr;
516 	int			rc;
517 
518 	req = ctx;
519 	ctrlr = (struct spdk_nvme_ctrlr *)req->user_buffer;
520 
521 	ctrlr->outstanding_aborts--;
522 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
523 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
524 		ctrlr->outstanding_aborts++;
525 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
526 		if (rc < 0) {
527 			SPDK_ERRLOG("Failed to submit queued abort.\n");
528 			memset(&next->cpl, 0, sizeof(next->cpl));
529 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
530 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
531 			next->cpl.status.dnr = 1;
532 			nvme_complete_request(next, &req->cpl);
533 			nvme_free_request(next);
534 		} else {
535 			/* If the first abort succeeds, stop iterating. */
536 			break;
537 		}
538 	}
539 
540 	req->user_cb_fn(req->user_cb_arg, cpl);
541 }
542 
543 int
544 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
545 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
546 {
547 	int rc;
548 	struct nvme_request *req;
549 	struct spdk_nvme_cmd *cmd;
550 	uint16_t sqid;
551 
552 	if (qpair) {
553 		sqid = qpair->id;
554 	} else {
555 		sqid = ctrlr->adminq->id; /* 0 */
556 	}
557 
558 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
559 	req = nvme_allocate_request_null(ctrlr->adminq, spdk_nvme_ctrlr_cmd_abort_cpl, NULL);
560 	if (req == NULL) {
561 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
562 		return -ENOMEM;
563 	}
564 	req->cb_arg = req;
565 	req->user_cb_fn = cb_fn;
566 	req->user_cb_arg = cb_arg;
567 	req->user_buffer = ctrlr; /* This is a hack to get to the ctrlr in the
568 				   * completion handler. */
569 
570 	cmd = &req->cmd;
571 	cmd->opc = SPDK_NVME_OPC_ABORT;
572 	cmd->cdw10 = (cid << 16) | sqid;
573 
574 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl) {
575 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
576 		rc = 0;
577 	} else {
578 		ctrlr->outstanding_aborts++;
579 		rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
580 	}
581 
582 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
583 	return rc;
584 }
585 
586 int
587 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
588 			 const struct spdk_nvme_fw_commit *fw_commit,
589 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
590 {
591 	struct nvme_request *req;
592 	struct spdk_nvme_cmd *cmd;
593 	int rc;
594 
595 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
596 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
597 	if (req == NULL) {
598 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
599 		return -ENOMEM;
600 	}
601 
602 	cmd = &req->cmd;
603 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
604 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
605 
606 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
607 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
608 
609 	return rc;
610 
611 }
612 
613 int
614 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
615 				 uint32_t size, uint32_t offset, void *payload,
616 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
617 {
618 	struct nvme_request *req;
619 	struct spdk_nvme_cmd *cmd;
620 	int rc;
621 
622 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
623 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
624 	if (req == NULL) {
625 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
626 		return -ENOMEM;
627 	}
628 
629 	cmd = &req->cmd;
630 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
631 	cmd->cdw10 = (size >> 2) - 1;
632 	cmd->cdw11 = offset >> 2;
633 
634 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
635 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
636 
637 	return rc;
638 }
639 
640 int
641 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
642 				     uint16_t spsp, uint8_t nssf, void *payload,
643 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
644 {
645 	struct nvme_request *req;
646 	struct spdk_nvme_cmd *cmd;
647 	int rc;
648 
649 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
650 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
651 					      cb_fn, cb_arg, false);
652 	if (req == NULL) {
653 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
654 		return -ENOMEM;
655 	}
656 
657 	cmd = &req->cmd;
658 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
659 	cmd->cdw10 = ((uint32_t)secp << 24) | ((uint32_t)spsp << 8) | ((uint32_t)nssf);
660 	cmd->cdw11 = payload_size;
661 
662 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
663 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
664 
665 	return rc;
666 }
667 
668 int
669 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
670 				  uint16_t spsp, uint8_t nssf, void *payload,
671 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
672 {
673 	struct nvme_request *req;
674 	struct spdk_nvme_cmd *cmd;
675 	int rc;
676 
677 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
678 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
679 					      cb_fn, cb_arg, true);
680 	if (req == NULL) {
681 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
682 		return -ENOMEM;
683 	}
684 
685 	cmd = &req->cmd;
686 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
687 	cmd->cdw10 = ((uint32_t)secp << 24) | ((uint32_t)spsp << 8) | ((uint32_t)nssf);
688 	cmd->cdw11 = payload_size;
689 
690 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
691 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
692 
693 	return rc;
694 }
695