xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision cdb0726b95631d46eaf4f2e39ddb6533f150fd27)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "nvme_internal.h"
7 
8 int
9 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
10 		struct spdk_nvme_qpair *qpair,
11 		struct spdk_nvme_cmd *cmd,
12 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
13 {
14 	struct nvme_request *req;
15 	struct nvme_payload payload;
16 
17 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
18 		return -EINVAL;
19 	}
20 
21 	memset(&payload, 0, sizeof(payload));
22 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
23 
24 	if (req == NULL) {
25 		return -ENOMEM;
26 	}
27 
28 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
29 
30 	return nvme_qpair_submit_request(qpair, req);
31 }
32 
33 int
34 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
35 			   struct spdk_nvme_qpair *qpair,
36 			   struct spdk_nvme_cmd *cmd,
37 			   void *buf, uint32_t len,
38 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
39 {
40 	struct nvme_request	*req;
41 
42 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
43 
44 	if (req == NULL) {
45 		return -ENOMEM;
46 	}
47 
48 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
49 
50 	return nvme_qpair_submit_request(qpair, req);
51 }
52 
53 int
54 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
55 				   struct spdk_nvme_qpair *qpair,
56 				   struct spdk_nvme_cmd *cmd,
57 				   void *buf, uint32_t len, void *md_buf,
58 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
59 {
60 	struct nvme_request *req;
61 	struct nvme_payload payload;
62 	uint32_t md_len = 0;
63 
64 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
65 
66 	/* Calculate metadata length */
67 	if (md_buf) {
68 		struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
69 
70 		assert(ns != NULL);
71 		assert(ns->sector_size != 0);
72 		md_len =  len / ns->sector_size * ns->md_size;
73 	}
74 
75 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
76 	if (req == NULL) {
77 		return -ENOMEM;
78 	}
79 
80 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
81 
82 	return nvme_qpair_submit_request(qpair, req);
83 }
84 
85 int
86 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
87 			      struct spdk_nvme_cmd *cmd,
88 			      void *buf, uint32_t len,
89 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
90 {
91 	struct nvme_request	*req;
92 	int			rc;
93 
94 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
95 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
96 	if (req == NULL) {
97 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
98 		return -ENOMEM;
99 	}
100 
101 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
102 
103 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
104 
105 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
106 	return rc;
107 }
108 
109 int
110 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
111 			uint8_t csi, void *payload, size_t payload_size,
112 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
113 {
114 	struct nvme_request *req;
115 	struct spdk_nvme_cmd *cmd;
116 
117 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
118 					      payload, payload_size,
119 					      cb_fn, cb_arg, false);
120 	if (req == NULL) {
121 		return -ENOMEM;
122 	}
123 
124 	cmd = &req->cmd;
125 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
126 	cmd->cdw10_bits.identify.cns = cns;
127 	cmd->cdw10_bits.identify.cntid = cntid;
128 	cmd->cdw11_bits.identify.csi = csi;
129 	cmd->nsid = nsid;
130 
131 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
132 }
133 
134 int
135 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
136 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
137 {
138 	struct nvme_request			*req;
139 	struct spdk_nvme_cmd			*cmd;
140 	int					rc;
141 
142 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
143 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
144 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
145 					      cb_fn, cb_arg, true);
146 	if (req == NULL) {
147 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
148 		return -ENOMEM;
149 	}
150 
151 	cmd = &req->cmd;
152 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
153 	cmd->nsid = nsid;
154 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
155 
156 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
157 
158 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
159 	return rc;
160 }
161 
162 int
163 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
164 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
165 {
166 	struct nvme_request			*req;
167 	struct spdk_nvme_cmd			*cmd;
168 	int					rc;
169 
170 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
171 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
172 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
173 					      cb_fn, cb_arg, true);
174 	if (req == NULL) {
175 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
176 		return -ENOMEM;
177 	}
178 
179 	cmd = &req->cmd;
180 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
181 	cmd->nsid = nsid;
182 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
183 
184 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
185 
186 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
187 	return rc;
188 }
189 
190 int
191 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
192 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
193 {
194 	struct nvme_request			*req;
195 	struct spdk_nvme_cmd			*cmd;
196 	int					rc;
197 
198 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
199 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
200 					      payload, sizeof(struct spdk_nvme_ns_data),
201 					      cb_fn, cb_arg, true);
202 	if (req == NULL) {
203 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
204 		return -ENOMEM;
205 	}
206 
207 	cmd = &req->cmd;
208 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
209 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
210 
211 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
212 
213 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
214 	return rc;
215 }
216 
217 int
218 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
219 			 void *cb_arg)
220 {
221 	struct nvme_request			*req;
222 	struct spdk_nvme_cmd			*cmd;
223 	int					rc;
224 
225 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
226 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
227 	if (req == NULL) {
228 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
229 		return -ENOMEM;
230 	}
231 
232 	cmd = &req->cmd;
233 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
234 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
235 	cmd->nsid = nsid;
236 
237 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
238 
239 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
240 	return rc;
241 }
242 
243 int
244 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
245 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
246 {
247 	struct nvme_request			*req;
248 	struct spdk_nvme_cmd			*cmd;
249 	int					rc;
250 
251 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
252 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
253 	if (req == NULL) {
254 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
255 		return -ENOMEM;
256 	}
257 
258 	cmd = &req->cmd;
259 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
260 	cmd->dptr.prp.prp1 = prp1;
261 	cmd->dptr.prp.prp2 = prp2;
262 
263 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
264 
265 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
266 	return rc;
267 }
268 
269 int
270 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
271 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
272 {
273 	struct nvme_request *req;
274 	struct spdk_nvme_cmd *cmd;
275 	int rc;
276 
277 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
278 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
279 	if (req == NULL) {
280 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
281 		return -ENOMEM;
282 	}
283 
284 	cmd = &req->cmd;
285 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
286 	cmd->nsid = nsid;
287 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
288 
289 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
290 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
291 
292 	return rc;
293 }
294 
295 int
296 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
297 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
298 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
299 {
300 	struct nvme_request *req;
301 	struct spdk_nvme_cmd *cmd;
302 	int rc;
303 
304 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
305 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
306 					      true);
307 	if (req == NULL) {
308 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
309 		return -ENOMEM;
310 	}
311 
312 	cmd = &req->cmd;
313 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
314 	cmd->cdw10_bits.set_features.fid = feature;
315 	cmd->cdw11 = cdw11;
316 	cmd->cdw12 = cdw12;
317 
318 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
319 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
320 
321 	return rc;
322 }
323 
324 int
325 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
326 				uint32_t cdw11, void *payload, uint32_t payload_size,
327 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
328 {
329 	struct nvme_request *req;
330 	struct spdk_nvme_cmd *cmd;
331 	int rc;
332 
333 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
334 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
335 					      false);
336 	if (req == NULL) {
337 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
338 		return -ENOMEM;
339 	}
340 
341 	cmd = &req->cmd;
342 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
343 	cmd->cdw10_bits.get_features.fid = feature;
344 	cmd->cdw11 = cdw11;
345 
346 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
347 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
348 
349 	return rc;
350 }
351 
352 int
353 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
354 				   uint32_t cdw11, void *payload,
355 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
356 				   void *cb_arg, uint32_t ns_id)
357 {
358 	struct nvme_request *req;
359 	struct spdk_nvme_cmd *cmd;
360 	int rc;
361 
362 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
363 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
364 					      false);
365 	if (req == NULL) {
366 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
367 		return -ENOMEM;
368 	}
369 
370 	cmd = &req->cmd;
371 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
372 	cmd->cdw10_bits.get_features.fid = feature;
373 	cmd->cdw11 = cdw11;
374 	cmd->nsid = ns_id;
375 
376 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
377 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
378 
379 	return rc;
380 }
381 
382 int
383 spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
384 				   uint32_t cdw11, uint32_t cdw12, void *payload,
385 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
386 				   void *cb_arg, uint32_t ns_id)
387 {
388 	struct nvme_request *req;
389 	struct spdk_nvme_cmd *cmd;
390 	int rc;
391 
392 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
393 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
394 					      true);
395 	if (req == NULL) {
396 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
397 		return -ENOMEM;
398 	}
399 
400 	cmd = &req->cmd;
401 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
402 	cmd->cdw10_bits.set_features.fid = feature;
403 	cmd->cdw11 = cdw11;
404 	cmd->cdw12 = cdw12;
405 	cmd->nsid = ns_id;
406 
407 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
408 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
409 
410 	return rc;
411 }
412 
413 int
414 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
415 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
416 {
417 	union spdk_nvme_feat_number_of_queues feat_num_queues;
418 
419 	feat_num_queues.raw = 0;
420 	feat_num_queues.bits.nsqr = num_queues - 1;
421 	feat_num_queues.bits.ncqr = num_queues - 1;
422 
423 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
424 					       0,
425 					       NULL, 0, cb_fn, cb_arg);
426 }
427 
428 int
429 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
430 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
431 {
432 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
433 					       cb_fn, cb_arg);
434 }
435 
436 int
437 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
438 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
439 				      void *cb_arg)
440 {
441 	uint32_t cdw11;
442 
443 	cdw11 = config.raw;
444 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
445 					       NULL, 0,
446 					       cb_fn, cb_arg);
447 }
448 
449 int
450 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
451 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
452 {
453 	union spdk_nvme_feat_host_identifier feat_host_identifier;
454 
455 	feat_host_identifier.raw = 0;
456 	if (host_id_size == 16) {
457 		/* 128-bit extended host identifier */
458 		feat_host_identifier.bits.exhid = 1;
459 	} else if (host_id_size == 8) {
460 		/* 64-bit host identifier */
461 		feat_host_identifier.bits.exhid = 0;
462 	} else {
463 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
464 		return -EINVAL;
465 	}
466 
467 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
468 					       feat_host_identifier.raw, 0,
469 					       host_id, host_id_size, cb_fn, cb_arg);
470 }
471 
472 int
473 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
474 				     uint32_t nsid, void *payload, uint32_t payload_size,
475 				     uint64_t offset, uint32_t cdw10,
476 				     uint32_t cdw11, uint32_t cdw14,
477 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
478 {
479 	struct nvme_request *req;
480 	struct spdk_nvme_cmd *cmd;
481 	uint32_t numd, numdl, numdu;
482 	uint32_t lpol, lpou;
483 	int rc;
484 
485 	if (payload_size == 0) {
486 		return -EINVAL;
487 	}
488 
489 	if (offset & 3) {
490 		return -EINVAL;
491 	}
492 
493 	numd = spdk_nvme_bytes_to_numd(payload_size);
494 	numdl = numd & 0xFFFFu;
495 	numdu = (numd >> 16) & 0xFFFFu;
496 
497 	lpol = (uint32_t)offset;
498 	lpou = (uint32_t)(offset >> 32);
499 
500 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
501 
502 	if (offset && !ctrlr->cdata.lpa.edlp) {
503 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
504 		return -EINVAL;
505 	}
506 
507 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
508 					      payload, payload_size, cb_fn, cb_arg, false);
509 	if (req == NULL) {
510 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
511 		return -ENOMEM;
512 	}
513 
514 	cmd = &req->cmd;
515 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
516 	cmd->nsid = nsid;
517 	cmd->cdw10 = cdw10;
518 	cmd->cdw10_bits.get_log_page.numdl = numdl;
519 	cmd->cdw10_bits.get_log_page.lid = log_page;
520 
521 	cmd->cdw11 = cdw11;
522 	cmd->cdw11_bits.get_log_page.numdu = numdu;
523 	cmd->cdw12 = lpol;
524 	cmd->cdw13 = lpou;
525 	cmd->cdw14 = cdw14;
526 
527 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
528 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
529 
530 	return rc;
531 }
532 
533 int
534 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
535 				 uint32_t nsid, void *payload, uint32_t payload_size,
536 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
537 {
538 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
539 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
540 }
541 
542 static void
543 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
544 {
545 	struct nvme_request	*next, *tmp;
546 	int rc;
547 
548 	if (ctrlr->is_resetting || ctrlr->is_destructed || ctrlr->is_failed) {
549 		return;
550 	}
551 
552 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
553 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
554 		ctrlr->outstanding_aborts++;
555 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
556 		if (rc < 0) {
557 			SPDK_ERRLOG("Failed to submit queued abort.\n");
558 			memset(&next->cpl, 0, sizeof(next->cpl));
559 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
560 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
561 			next->cpl.status.dnr = 1;
562 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
563 			nvme_free_request(next);
564 		} else {
565 			/* If the first abort succeeds, stop iterating. */
566 			break;
567 		}
568 	}
569 }
570 
571 static int
572 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
573 				 struct nvme_request *req)
574 {
575 	/* ACL is a 0's based value. */
576 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
577 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
578 		return 0;
579 	} else {
580 		ctrlr->outstanding_aborts++;
581 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
582 	}
583 }
584 
585 static void
586 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
587 {
588 	struct nvme_request	*req = ctx;
589 	struct spdk_nvme_ctrlr	*ctrlr;
590 
591 	ctrlr = req->qpair->ctrlr;
592 
593 	assert(ctrlr->outstanding_aborts > 0);
594 	ctrlr->outstanding_aborts--;
595 	nvme_ctrlr_retry_queued_abort(ctrlr);
596 
597 	req->user_cb_fn(req->user_cb_arg, cpl);
598 }
599 
600 int
601 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
602 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
603 {
604 	int rc;
605 	struct nvme_request *req;
606 	struct spdk_nvme_cmd *cmd;
607 
608 	if (qpair == NULL) {
609 		qpair = ctrlr->adminq;
610 	}
611 
612 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
613 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
614 	if (req == NULL) {
615 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
616 		return -ENOMEM;
617 	}
618 	req->cb_arg = req;
619 	req->user_cb_fn = cb_fn;
620 	req->user_cb_arg = cb_arg;
621 
622 	cmd = &req->cmd;
623 	cmd->opc = SPDK_NVME_OPC_ABORT;
624 	cmd->cdw10_bits.abort.sqid = qpair->id;
625 	cmd->cdw10_bits.abort.cid = cid;
626 
627 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
628 
629 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
630 	return rc;
631 }
632 
633 static void
634 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
635 {
636 	struct nvme_request *req = ctx;
637 	struct nvme_request *parent = req->parent;
638 	struct spdk_nvme_ctrlr *ctrlr;
639 
640 	ctrlr = req->qpair->ctrlr;
641 
642 	assert(ctrlr->outstanding_aborts > 0);
643 	ctrlr->outstanding_aborts--;
644 	nvme_ctrlr_retry_queued_abort(ctrlr);
645 
646 	nvme_request_remove_child(parent, req);
647 
648 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
649 		parent->parent_status.cdw0 |= 1U;
650 	}
651 
652 	if (parent->num_children == 0) {
653 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
654 				      parent, &parent->parent_status);
655 		nvme_free_request(parent);
656 	}
657 }
658 
659 static int
660 nvme_request_add_abort(struct nvme_request *req, void *arg)
661 {
662 	struct nvme_request *parent = arg;
663 	struct nvme_request *child;
664 	void *cmd_cb_arg;
665 
666 	cmd_cb_arg = parent->user_cb_arg;
667 
668 	if (req->cb_arg != cmd_cb_arg &&
669 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
670 		return 0;
671 	}
672 
673 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
674 					   nvme_complete_abort_request, NULL);
675 	if (child == NULL) {
676 		return -ENOMEM;
677 	}
678 
679 	child->cb_arg = child;
680 
681 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
682 	/* Copy SQID from the parent. */
683 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
684 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
685 
686 	child->parent = parent;
687 
688 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
689 	parent->num_children++;
690 
691 	return 0;
692 }
693 
694 int
695 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
696 			      void *cmd_cb_arg,
697 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
698 {
699 	int rc = 0;
700 	struct nvme_request *parent, *child, *tmp;
701 	bool child_failed = false;
702 	int aborted = 0;
703 
704 	if (cmd_cb_arg == NULL) {
705 		return -EINVAL;
706 	}
707 
708 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
709 
710 	if (qpair == NULL) {
711 		qpair = ctrlr->adminq;
712 	}
713 
714 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
715 	if (parent == NULL) {
716 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
717 
718 		return -ENOMEM;
719 	}
720 
721 	TAILQ_INIT(&parent->children);
722 	parent->num_children = 0;
723 
724 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
725 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
726 
727 	/* Hold SQID that the requests to abort are associated with.
728 	 * This will be copied to the children.
729 	 *
730 	 * CID is not set here because the parent is not submitted directly
731 	 * and CID is not determined until request to abort is found.
732 	 */
733 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
734 
735 	/* This is used to find request to abort. */
736 	parent->user_cb_arg = cmd_cb_arg;
737 
738 	/* Add an abort request for each outstanding request which has cmd_cb_arg
739 	 * as its callback context.
740 	 */
741 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
742 	if (rc != 0) {
743 		/* Free abort requests already added. */
744 		child_failed = true;
745 	}
746 
747 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
748 		if (spdk_likely(!child_failed)) {
749 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
750 			if (spdk_unlikely(rc != 0)) {
751 				child_failed = true;
752 			}
753 		} else {
754 			/* Free remaining abort requests. */
755 			nvme_request_remove_child(parent, child);
756 			nvme_free_request(child);
757 		}
758 	}
759 
760 	if (spdk_likely(!child_failed)) {
761 		/* There is no error so far. Abort requests were submitted successfully
762 		 * or there was no outstanding request to abort.
763 		 *
764 		 * Hence abort queued requests which has cmd_cb_arg as its callback
765 		 * context next.
766 		 */
767 		aborted = nvme_qpair_abort_queued_reqs_with_cbarg(qpair, cmd_cb_arg);
768 		if (parent->num_children == 0) {
769 			/* There was no outstanding request to abort. */
770 			if (aborted > 0) {
771 				/* The queued requests were successfully aborted. Hence
772 				 * complete the parent request with success synchronously.
773 				 */
774 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
775 						      parent, &parent->parent_status);
776 				nvme_free_request(parent);
777 			} else {
778 				/* There was no queued request to abort. */
779 				rc = -ENOENT;
780 			}
781 		}
782 	} else {
783 		/* Failed to add or submit abort request. */
784 		if (parent->num_children != 0) {
785 			/* Return success since we must wait for those children
786 			 * to complete but set the parent request to failure.
787 			 */
788 			parent->parent_status.cdw0 |= 1U;
789 			rc = 0;
790 		}
791 	}
792 
793 	if (rc != 0) {
794 		nvme_free_request(parent);
795 	}
796 
797 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
798 	return rc;
799 }
800 
801 int
802 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
803 			 const struct spdk_nvme_fw_commit *fw_commit,
804 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
805 {
806 	struct nvme_request *req;
807 	struct spdk_nvme_cmd *cmd;
808 	int rc;
809 
810 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
811 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
812 	if (req == NULL) {
813 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
814 		return -ENOMEM;
815 	}
816 
817 	cmd = &req->cmd;
818 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
819 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
820 
821 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
822 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
823 
824 	return rc;
825 
826 }
827 
828 int
829 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
830 				 uint32_t size, uint32_t offset, void *payload,
831 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
832 {
833 	struct nvme_request *req;
834 	struct spdk_nvme_cmd *cmd;
835 	int rc;
836 
837 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
838 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
839 	if (req == NULL) {
840 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
841 		return -ENOMEM;
842 	}
843 
844 	cmd = &req->cmd;
845 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
846 	cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
847 	cmd->cdw11 = offset >> 2;
848 
849 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
850 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
851 
852 	return rc;
853 }
854 
855 int
856 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
857 				     uint16_t spsp, uint8_t nssf, void *payload,
858 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
859 {
860 	struct nvme_request *req;
861 	struct spdk_nvme_cmd *cmd;
862 	int rc;
863 
864 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
865 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
866 					      cb_fn, cb_arg, false);
867 	if (req == NULL) {
868 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
869 		return -ENOMEM;
870 	}
871 
872 	cmd = &req->cmd;
873 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
874 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
875 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
876 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
877 	cmd->cdw10_bits.sec_send_recv.secp = secp;
878 	cmd->cdw11 = payload_size;
879 
880 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
881 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
882 
883 	return rc;
884 }
885 
886 int
887 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
888 				  uint16_t spsp, uint8_t nssf, void *payload,
889 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
890 {
891 	struct nvme_request *req;
892 	struct spdk_nvme_cmd *cmd;
893 	int rc;
894 
895 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
896 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
897 					      cb_fn, cb_arg, true);
898 	if (req == NULL) {
899 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
900 		return -ENOMEM;
901 	}
902 
903 	cmd = &req->cmd;
904 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
905 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
906 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
907 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
908 	cmd->cdw10_bits.sec_send_recv.secp = secp;
909 	cmd->cdw11 = payload_size;
910 
911 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
912 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
913 
914 	return rc;
915 }
916 
917 int
918 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
919 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
920 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
921 {
922 	struct nvme_request *req;
923 	struct spdk_nvme_cmd *cmd;
924 	int rc;
925 
926 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
927 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
928 	if (req == NULL) {
929 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
930 		return -ENOMEM;
931 	}
932 
933 	cmd = &req->cmd;
934 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
935 	cmd->nsid = nsid;
936 	cmd->cdw11 = cdw11;
937 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
938 
939 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
940 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
941 
942 	return rc;
943 }
944 
945 static int
946 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
947 			 uint32_t doper, uint32_t dtype, uint32_t dspec,
948 			 void *payload, uint32_t payload_size, uint32_t cdw12,
949 			 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
950 			 uint16_t opc_type, bool host_to_ctrlr)
951 {
952 	struct nvme_request *req = NULL;
953 	struct spdk_nvme_cmd *cmd = NULL;
954 	int rc;
955 
956 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
957 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
958 					      cb_fn, cb_arg, host_to_ctrlr);
959 	if (req == NULL) {
960 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
961 		return -ENOMEM;
962 	}
963 	cmd = &req->cmd;
964 	cmd->opc = opc_type;
965 	cmd->nsid = nsid;
966 
967 	if ((payload_size >> 2) > 0) {
968 		cmd->cdw10 = (payload_size >> 2) - 1;
969 	}
970 	cmd->cdw11_bits.directive.doper = doper;
971 	cmd->cdw11_bits.directive.dtype = dtype;
972 	cmd->cdw11_bits.directive.dspec = dspec;
973 	cmd->cdw12 = cdw12;
974 	cmd->cdw13 = cdw13;
975 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
976 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
977 
978 	return rc;
979 }
980 
981 int
982 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
983 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
984 				   void *payload, uint32_t payload_size, uint32_t cdw12,
985 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
986 {
987 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
988 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
989 					SPDK_NVME_OPC_DIRECTIVE_SEND, true);
990 }
991 
992 int
993 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
994 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
995 				      void *payload, uint32_t payload_size, uint32_t cdw12,
996 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
997 {
998 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
999 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1000 					SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1001 }
1002