xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision a6dbe3721eb3b5990707fc3e378c95e505dd8ab5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "nvme_internal.h"
7 #include "spdk/nvme.h"
8 
9 int
10 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
11 		struct spdk_nvme_qpair *qpair,
12 		struct spdk_nvme_cmd *cmd,
13 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
14 {
15 	struct nvme_request *req;
16 	struct nvme_payload payload;
17 
18 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
19 		return -EINVAL;
20 	}
21 
22 	memset(&payload, 0, sizeof(payload));
23 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
24 
25 	if (req == NULL) {
26 		return -ENOMEM;
27 	}
28 
29 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
30 
31 	return nvme_qpair_submit_request(qpair, req);
32 }
33 
34 int
35 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
36 			   struct spdk_nvme_qpair *qpair,
37 			   struct spdk_nvme_cmd *cmd,
38 			   void *buf, uint32_t len,
39 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
40 {
41 	struct nvme_request	*req;
42 
43 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
44 
45 	if (req == NULL) {
46 		return -ENOMEM;
47 	}
48 
49 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
50 
51 	return nvme_qpair_submit_request(qpair, req);
52 }
53 
54 int
55 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
56 				   struct spdk_nvme_qpair *qpair,
57 				   struct spdk_nvme_cmd *cmd,
58 				   void *buf, uint32_t len, void *md_buf,
59 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
60 {
61 	struct nvme_request *req;
62 	struct nvme_payload payload;
63 	uint32_t md_len = 0;
64 
65 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
66 
67 	/* Calculate metadata length */
68 	if (md_buf) {
69 		struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
70 
71 		assert(ns != NULL);
72 		assert(ns->sector_size != 0);
73 		md_len =  len / ns->sector_size * ns->md_size;
74 	}
75 
76 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
77 	if (req == NULL) {
78 		return -ENOMEM;
79 	}
80 
81 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
82 
83 	return nvme_qpair_submit_request(qpair, req);
84 }
85 
86 int
87 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
88 			      struct spdk_nvme_cmd *cmd,
89 			      void *buf, uint32_t len,
90 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
91 {
92 	struct nvme_request	*req;
93 	int			rc;
94 
95 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
96 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
97 	if (req == NULL) {
98 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
99 		return -ENOMEM;
100 	}
101 
102 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
103 
104 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
105 
106 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
107 	return rc;
108 }
109 
110 int
111 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
112 			uint8_t csi, void *payload, size_t payload_size,
113 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
114 {
115 	struct nvme_request *req;
116 	struct spdk_nvme_cmd *cmd;
117 
118 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
119 					      payload, payload_size,
120 					      cb_fn, cb_arg, false);
121 	if (req == NULL) {
122 		return -ENOMEM;
123 	}
124 
125 	cmd = &req->cmd;
126 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
127 	cmd->cdw10_bits.identify.cns = cns;
128 	cmd->cdw10_bits.identify.cntid = cntid;
129 	cmd->cdw11_bits.identify.csi = csi;
130 	cmd->nsid = nsid;
131 
132 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
133 }
134 
135 int
136 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
137 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
138 {
139 	struct nvme_request			*req;
140 	struct spdk_nvme_cmd			*cmd;
141 	int					rc;
142 
143 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
144 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
145 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
146 					      cb_fn, cb_arg, true);
147 	if (req == NULL) {
148 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
149 		return -ENOMEM;
150 	}
151 
152 	cmd = &req->cmd;
153 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
154 	cmd->nsid = nsid;
155 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
156 
157 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
158 
159 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
160 	return rc;
161 }
162 
163 int
164 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
165 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
166 {
167 	struct nvme_request			*req;
168 	struct spdk_nvme_cmd			*cmd;
169 	int					rc;
170 
171 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
172 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
173 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
174 					      cb_fn, cb_arg, true);
175 	if (req == NULL) {
176 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
177 		return -ENOMEM;
178 	}
179 
180 	cmd = &req->cmd;
181 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
182 	cmd->nsid = nsid;
183 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
184 
185 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
186 
187 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
188 	return rc;
189 }
190 
191 int
192 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
193 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
194 {
195 	struct nvme_request			*req;
196 	struct spdk_nvme_cmd			*cmd;
197 	int					rc;
198 
199 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
200 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
201 					      payload, sizeof(struct spdk_nvme_ns_data),
202 					      cb_fn, cb_arg, true);
203 	if (req == NULL) {
204 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
205 		return -ENOMEM;
206 	}
207 
208 	cmd = &req->cmd;
209 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
210 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
211 
212 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
213 
214 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
215 	return rc;
216 }
217 
218 int
219 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
220 			 void *cb_arg)
221 {
222 	struct nvme_request			*req;
223 	struct spdk_nvme_cmd			*cmd;
224 	int					rc;
225 
226 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
227 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
228 	if (req == NULL) {
229 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
230 		return -ENOMEM;
231 	}
232 
233 	cmd = &req->cmd;
234 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
235 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
236 	cmd->nsid = nsid;
237 
238 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
239 
240 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
241 	return rc;
242 }
243 
244 int
245 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
246 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
247 {
248 	struct nvme_request			*req;
249 	struct spdk_nvme_cmd			*cmd;
250 	int					rc;
251 
252 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
253 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
254 	if (req == NULL) {
255 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
256 		return -ENOMEM;
257 	}
258 
259 	cmd = &req->cmd;
260 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
261 	cmd->dptr.prp.prp1 = prp1;
262 	cmd->dptr.prp.prp2 = prp2;
263 
264 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
265 
266 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
267 	return rc;
268 }
269 
270 int
271 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
272 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
273 {
274 	struct nvme_request *req;
275 	struct spdk_nvme_cmd *cmd;
276 	int rc;
277 
278 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
279 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
280 	if (req == NULL) {
281 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
282 		return -ENOMEM;
283 	}
284 
285 	cmd = &req->cmd;
286 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
287 	cmd->nsid = nsid;
288 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
289 
290 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
291 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
292 
293 	return rc;
294 }
295 
296 int
297 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
298 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
299 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
300 {
301 	struct nvme_request *req;
302 	struct spdk_nvme_cmd *cmd;
303 	int rc;
304 
305 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
306 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
307 					      true);
308 	if (req == NULL) {
309 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
310 		return -ENOMEM;
311 	}
312 
313 	cmd = &req->cmd;
314 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
315 	cmd->cdw10_bits.set_features.fid = feature;
316 	cmd->cdw11 = cdw11;
317 	cmd->cdw12 = cdw12;
318 
319 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
320 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
321 
322 	return rc;
323 }
324 
325 int
326 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
327 				uint32_t cdw11, void *payload, uint32_t payload_size,
328 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
329 {
330 	struct nvme_request *req;
331 	struct spdk_nvme_cmd *cmd;
332 	int rc;
333 
334 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
335 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
336 					      false);
337 	if (req == NULL) {
338 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
339 		return -ENOMEM;
340 	}
341 
342 	cmd = &req->cmd;
343 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
344 	cmd->cdw10_bits.get_features.fid = feature;
345 	cmd->cdw11 = cdw11;
346 
347 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
348 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
349 
350 	return rc;
351 }
352 
353 int
354 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
355 				   uint32_t cdw11, void *payload,
356 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
357 				   void *cb_arg, uint32_t ns_id)
358 {
359 	struct nvme_request *req;
360 	struct spdk_nvme_cmd *cmd;
361 	int rc;
362 
363 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
364 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
365 					      false);
366 	if (req == NULL) {
367 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
368 		return -ENOMEM;
369 	}
370 
371 	cmd = &req->cmd;
372 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
373 	cmd->cdw10_bits.get_features.fid = feature;
374 	cmd->cdw11 = cdw11;
375 	cmd->nsid = ns_id;
376 
377 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
378 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
379 
380 	return rc;
381 }
382 
383 int
384 spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
385 				   uint32_t cdw11, uint32_t cdw12, void *payload,
386 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
387 				   void *cb_arg, uint32_t ns_id)
388 {
389 	struct nvme_request *req;
390 	struct spdk_nvme_cmd *cmd;
391 	int rc;
392 
393 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
394 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
395 					      true);
396 	if (req == NULL) {
397 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
398 		return -ENOMEM;
399 	}
400 
401 	cmd = &req->cmd;
402 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
403 	cmd->cdw10_bits.set_features.fid = feature;
404 	cmd->cdw11 = cdw11;
405 	cmd->cdw12 = cdw12;
406 	cmd->nsid = ns_id;
407 
408 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
409 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
410 
411 	return rc;
412 }
413 
414 int
415 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
416 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
417 {
418 	union spdk_nvme_feat_number_of_queues feat_num_queues;
419 
420 	feat_num_queues.raw = 0;
421 	feat_num_queues.bits.nsqr = num_queues - 1;
422 	feat_num_queues.bits.ncqr = num_queues - 1;
423 
424 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
425 					       0,
426 					       NULL, 0, cb_fn, cb_arg);
427 }
428 
429 int
430 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
431 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
432 {
433 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
434 					       cb_fn, cb_arg);
435 }
436 
437 int
438 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
439 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
440 				      void *cb_arg)
441 {
442 	uint32_t cdw11;
443 
444 	cdw11 = config.raw;
445 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
446 					       NULL, 0,
447 					       cb_fn, cb_arg);
448 }
449 
450 int
451 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
452 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
453 {
454 	union spdk_nvme_feat_host_identifier feat_host_identifier;
455 
456 	feat_host_identifier.raw = 0;
457 	if (host_id_size == 16) {
458 		/* 128-bit extended host identifier */
459 		feat_host_identifier.bits.exhid = 1;
460 	} else if (host_id_size == 8) {
461 		/* 64-bit host identifier */
462 		feat_host_identifier.bits.exhid = 0;
463 	} else {
464 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
465 		return -EINVAL;
466 	}
467 
468 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
469 					       feat_host_identifier.raw, 0,
470 					       host_id, host_id_size, cb_fn, cb_arg);
471 }
472 
473 int
474 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
475 				     uint32_t nsid, void *payload, uint32_t payload_size,
476 				     uint64_t offset, uint32_t cdw10,
477 				     uint32_t cdw11, uint32_t cdw14,
478 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
479 {
480 	struct nvme_request *req;
481 	struct spdk_nvme_cmd *cmd;
482 	uint32_t numd, numdl, numdu;
483 	uint32_t lpol, lpou;
484 	int rc;
485 
486 	if (payload_size == 0) {
487 		return -EINVAL;
488 	}
489 
490 	if (offset & 3) {
491 		return -EINVAL;
492 	}
493 
494 	numd = spdk_nvme_bytes_to_numd(payload_size);
495 	numdl = numd & 0xFFFFu;
496 	numdu = (numd >> 16) & 0xFFFFu;
497 
498 	lpol = (uint32_t)offset;
499 	lpou = (uint32_t)(offset >> 32);
500 
501 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
502 
503 	if (offset && !ctrlr->cdata.lpa.edlp) {
504 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
505 		return -EINVAL;
506 	}
507 
508 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
509 					      payload, payload_size, cb_fn, cb_arg, false);
510 	if (req == NULL) {
511 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
512 		return -ENOMEM;
513 	}
514 
515 	cmd = &req->cmd;
516 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
517 	cmd->nsid = nsid;
518 	cmd->cdw10 = cdw10;
519 	cmd->cdw10_bits.get_log_page.numdl = numdl;
520 	cmd->cdw10_bits.get_log_page.lid = log_page;
521 
522 	cmd->cdw11 = cdw11;
523 	cmd->cdw11_bits.get_log_page.numdu = numdu;
524 	cmd->cdw12 = lpol;
525 	cmd->cdw13 = lpou;
526 	cmd->cdw14 = cdw14;
527 
528 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
529 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
530 
531 	return rc;
532 }
533 
534 int
535 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
536 				 uint32_t nsid, void *payload, uint32_t payload_size,
537 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
538 {
539 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
540 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
541 }
542 
543 static void
544 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
545 {
546 	struct nvme_request	*next, *tmp;
547 	int rc;
548 
549 	if (ctrlr->is_resetting || ctrlr->is_destructed || ctrlr->is_failed) {
550 		/* Don't resubmit aborts if ctrlr is failing */
551 		return;
552 	}
553 
554 	if (spdk_nvme_ctrlr_get_admin_qp_failure_reason(ctrlr) != SPDK_NVME_QPAIR_FAILURE_NONE) {
555 		/* Don't resubmit aborts if admin qpair is failed */
556 		return;
557 	}
558 
559 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
560 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
561 		ctrlr->outstanding_aborts++;
562 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
563 		if (rc < 0) {
564 			SPDK_ERRLOG("Failed to submit queued abort.\n");
565 			memset(&next->cpl, 0, sizeof(next->cpl));
566 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
567 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
568 			next->cpl.status.dnr = 1;
569 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
570 			nvme_free_request(next);
571 		} else {
572 			/* If the first abort succeeds, stop iterating. */
573 			break;
574 		}
575 	}
576 }
577 
578 static int
579 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
580 				 struct nvme_request *req)
581 {
582 	/* ACL is a 0's based value. */
583 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
584 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
585 		return 0;
586 	} else {
587 		ctrlr->outstanding_aborts++;
588 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
589 	}
590 }
591 
592 static void
593 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
594 {
595 	struct nvme_request	*req = ctx;
596 	struct spdk_nvme_ctrlr	*ctrlr;
597 
598 	ctrlr = req->qpair->ctrlr;
599 
600 	assert(ctrlr->outstanding_aborts > 0);
601 	ctrlr->outstanding_aborts--;
602 	nvme_ctrlr_retry_queued_abort(ctrlr);
603 
604 	req->user_cb_fn(req->user_cb_arg, cpl);
605 }
606 
607 int
608 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
609 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
610 {
611 	int rc;
612 	struct nvme_request *req;
613 	struct spdk_nvme_cmd *cmd;
614 
615 	if (qpair == NULL) {
616 		qpair = ctrlr->adminq;
617 	}
618 
619 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
620 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
621 	if (req == NULL) {
622 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
623 		return -ENOMEM;
624 	}
625 	req->cb_arg = req;
626 	req->user_cb_fn = cb_fn;
627 	req->user_cb_arg = cb_arg;
628 
629 	cmd = &req->cmd;
630 	cmd->opc = SPDK_NVME_OPC_ABORT;
631 	cmd->cdw10_bits.abort.sqid = qpair->id;
632 	cmd->cdw10_bits.abort.cid = cid;
633 
634 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
635 
636 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
637 	return rc;
638 }
639 
640 static void
641 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
642 {
643 	struct nvme_request *req = ctx;
644 	struct nvme_request *parent = req->parent;
645 	struct spdk_nvme_ctrlr *ctrlr;
646 
647 	ctrlr = req->qpair->ctrlr;
648 
649 	assert(ctrlr->outstanding_aborts > 0);
650 	ctrlr->outstanding_aborts--;
651 	nvme_ctrlr_retry_queued_abort(ctrlr);
652 
653 	nvme_request_remove_child(parent, req);
654 
655 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
656 		parent->parent_status.cdw0 |= 1U;
657 	}
658 
659 	if (parent->num_children == 0) {
660 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
661 				      parent, &parent->parent_status);
662 		nvme_free_request(parent);
663 	}
664 }
665 
666 static int
667 nvme_request_add_abort(struct nvme_request *req, void *arg)
668 {
669 	struct nvme_request *parent = arg;
670 	struct nvme_request *child;
671 	void *cmd_cb_arg;
672 
673 	cmd_cb_arg = parent->user_cb_arg;
674 
675 	if (req->cb_arg != cmd_cb_arg &&
676 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
677 		return 0;
678 	}
679 
680 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
681 					   nvme_complete_abort_request, NULL);
682 	if (child == NULL) {
683 		return -ENOMEM;
684 	}
685 
686 	child->cb_arg = child;
687 
688 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
689 	/* Copy SQID from the parent. */
690 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
691 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
692 
693 	child->parent = parent;
694 
695 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
696 	parent->num_children++;
697 
698 	return 0;
699 }
700 
701 int
702 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
703 			      void *cmd_cb_arg,
704 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
705 {
706 	int rc = 0;
707 	struct nvme_request *parent, *child, *tmp;
708 	bool child_failed = false;
709 	int aborted = 0;
710 
711 	if (cmd_cb_arg == NULL) {
712 		return -EINVAL;
713 	}
714 
715 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
716 
717 	if (qpair == NULL) {
718 		qpair = ctrlr->adminq;
719 	}
720 
721 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
722 	if (parent == NULL) {
723 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
724 
725 		return -ENOMEM;
726 	}
727 
728 	TAILQ_INIT(&parent->children);
729 	parent->num_children = 0;
730 
731 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
732 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
733 
734 	/* Hold SQID that the requests to abort are associated with.
735 	 * This will be copied to the children.
736 	 *
737 	 * CID is not set here because the parent is not submitted directly
738 	 * and CID is not determined until request to abort is found.
739 	 */
740 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
741 
742 	/* This is used to find request to abort. */
743 	parent->user_cb_arg = cmd_cb_arg;
744 
745 	/* Add an abort request for each outstanding request which has cmd_cb_arg
746 	 * as its callback context.
747 	 */
748 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
749 	if (rc != 0) {
750 		/* Free abort requests already added. */
751 		child_failed = true;
752 	}
753 
754 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
755 		if (spdk_likely(!child_failed)) {
756 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
757 			if (spdk_unlikely(rc != 0)) {
758 				child_failed = true;
759 			}
760 		} else {
761 			/* Free remaining abort requests. */
762 			nvme_request_remove_child(parent, child);
763 			nvme_free_request(child);
764 		}
765 	}
766 
767 	if (spdk_likely(!child_failed)) {
768 		/* There is no error so far. Abort requests were submitted successfully
769 		 * or there was no outstanding request to abort.
770 		 *
771 		 * Hence abort queued requests which has cmd_cb_arg as its callback
772 		 * context next.
773 		 */
774 		aborted = nvme_qpair_abort_queued_reqs_with_cbarg(qpair, cmd_cb_arg);
775 		if (parent->num_children == 0) {
776 			/* There was no outstanding request to abort. */
777 			if (aborted > 0) {
778 				/* The queued requests were successfully aborted. Hence
779 				 * complete the parent request with success synchronously.
780 				 */
781 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
782 						      parent, &parent->parent_status);
783 				nvme_free_request(parent);
784 			} else {
785 				/* There was no queued request to abort. */
786 				rc = -ENOENT;
787 			}
788 		}
789 	} else {
790 		/* Failed to add or submit abort request. */
791 		if (parent->num_children != 0) {
792 			/* Return success since we must wait for those children
793 			 * to complete but set the parent request to failure.
794 			 */
795 			parent->parent_status.cdw0 |= 1U;
796 			rc = 0;
797 		}
798 	}
799 
800 	if (rc != 0) {
801 		nvme_free_request(parent);
802 	}
803 
804 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
805 	return rc;
806 }
807 
808 int
809 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
810 			 const struct spdk_nvme_fw_commit *fw_commit,
811 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
812 {
813 	struct nvme_request *req;
814 	struct spdk_nvme_cmd *cmd;
815 	int rc;
816 
817 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
818 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
819 	if (req == NULL) {
820 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
821 		return -ENOMEM;
822 	}
823 
824 	cmd = &req->cmd;
825 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
826 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
827 
828 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
829 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
830 
831 	return rc;
832 
833 }
834 
835 int
836 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
837 				 uint32_t size, uint32_t offset, void *payload,
838 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
839 {
840 	struct nvme_request *req;
841 	struct spdk_nvme_cmd *cmd;
842 	int rc;
843 
844 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
845 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
846 	if (req == NULL) {
847 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
848 		return -ENOMEM;
849 	}
850 
851 	cmd = &req->cmd;
852 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
853 	cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
854 	cmd->cdw11 = offset >> 2;
855 
856 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
857 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
858 
859 	return rc;
860 }
861 
862 int
863 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
864 				     uint16_t spsp, uint8_t nssf, void *payload,
865 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
866 {
867 	struct nvme_request *req;
868 	struct spdk_nvme_cmd *cmd;
869 	int rc;
870 
871 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
872 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
873 					      cb_fn, cb_arg, false);
874 	if (req == NULL) {
875 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
876 		return -ENOMEM;
877 	}
878 
879 	cmd = &req->cmd;
880 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
881 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
882 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
883 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
884 	cmd->cdw10_bits.sec_send_recv.secp = secp;
885 	cmd->cdw11 = payload_size;
886 
887 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
888 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
889 
890 	return rc;
891 }
892 
893 int
894 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
895 				  uint16_t spsp, uint8_t nssf, void *payload,
896 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
897 {
898 	struct nvme_request *req;
899 	struct spdk_nvme_cmd *cmd;
900 	int rc;
901 
902 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
903 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
904 					      cb_fn, cb_arg, true);
905 	if (req == NULL) {
906 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
907 		return -ENOMEM;
908 	}
909 
910 	cmd = &req->cmd;
911 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
912 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
913 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
914 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
915 	cmd->cdw10_bits.sec_send_recv.secp = secp;
916 	cmd->cdw11 = payload_size;
917 
918 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
919 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
920 
921 	return rc;
922 }
923 
924 int
925 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
926 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
927 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
928 {
929 	struct nvme_request *req;
930 	struct spdk_nvme_cmd *cmd;
931 	int rc;
932 
933 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
934 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
935 	if (req == NULL) {
936 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
937 		return -ENOMEM;
938 	}
939 
940 	cmd = &req->cmd;
941 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
942 	cmd->nsid = nsid;
943 	cmd->cdw11 = cdw11;
944 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
945 
946 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
947 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
948 
949 	return rc;
950 }
951 
952 static int
953 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
954 			 uint32_t doper, uint32_t dtype, uint32_t dspec,
955 			 void *payload, uint32_t payload_size, uint32_t cdw12,
956 			 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
957 			 uint16_t opc_type, bool host_to_ctrlr)
958 {
959 	struct nvme_request *req = NULL;
960 	struct spdk_nvme_cmd *cmd = NULL;
961 	int rc;
962 
963 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
964 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
965 					      cb_fn, cb_arg, host_to_ctrlr);
966 	if (req == NULL) {
967 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
968 		return -ENOMEM;
969 	}
970 	cmd = &req->cmd;
971 	cmd->opc = opc_type;
972 	cmd->nsid = nsid;
973 
974 	if ((payload_size >> 2) > 0) {
975 		cmd->cdw10 = (payload_size >> 2) - 1;
976 	}
977 	cmd->cdw11_bits.directive.doper = doper;
978 	cmd->cdw11_bits.directive.dtype = dtype;
979 	cmd->cdw11_bits.directive.dspec = dspec;
980 	cmd->cdw12 = cdw12;
981 	cmd->cdw13 = cdw13;
982 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
983 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
984 
985 	return rc;
986 }
987 
988 int
989 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
990 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
991 				   void *payload, uint32_t payload_size, uint32_t cdw12,
992 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
993 {
994 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
995 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
996 					SPDK_NVME_OPC_DIRECTIVE_SEND, true);
997 }
998 
999 int
1000 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1001 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
1002 				      void *payload, uint32_t payload_size, uint32_t cdw12,
1003 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1004 {
1005 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1006 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1007 					SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1008 }
1009