xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision 441431d22872ae4e05a1bf8b78e9aeff1eba1eb3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "nvme_internal.h"
35 
36 int
37 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
38 		struct spdk_nvme_qpair *qpair,
39 		struct spdk_nvme_cmd *cmd,
40 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
41 {
42 	struct nvme_request *req;
43 	struct nvme_payload payload;
44 
45 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
46 		return -EINVAL;
47 	}
48 
49 	memset(&payload, 0, sizeof(payload));
50 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
51 
52 	if (req == NULL) {
53 		return -ENOMEM;
54 	}
55 
56 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
57 
58 	return nvme_qpair_submit_request(qpair, req);
59 }
60 
61 int
62 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
63 			   struct spdk_nvme_qpair *qpair,
64 			   struct spdk_nvme_cmd *cmd,
65 			   void *buf, uint32_t len,
66 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
67 {
68 	struct nvme_request	*req;
69 
70 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
71 
72 	if (req == NULL) {
73 		return -ENOMEM;
74 	}
75 
76 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
77 
78 	return nvme_qpair_submit_request(qpair, req);
79 }
80 
81 int
82 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
83 				   struct spdk_nvme_qpair *qpair,
84 				   struct spdk_nvme_cmd *cmd,
85 				   void *buf, uint32_t len, void *md_buf,
86 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
87 {
88 	struct nvme_request *req;
89 	struct nvme_payload payload;
90 	uint32_t md_len = 0;
91 
92 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
93 
94 	/* Caculate metadata length */
95 	if (md_buf) {
96 		struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
97 
98 		assert(ns != NULL);
99 		assert(ns->sector_size != 0);
100 		md_len =  len / ns->sector_size * ns->md_size;
101 	}
102 
103 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
104 	if (req == NULL) {
105 		return -ENOMEM;
106 	}
107 
108 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
109 
110 	return nvme_qpair_submit_request(qpair, req);
111 }
112 
113 int
114 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
115 			      struct spdk_nvme_cmd *cmd,
116 			      void *buf, uint32_t len,
117 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
118 {
119 	struct nvme_request	*req;
120 	int			rc;
121 
122 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
123 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
124 	if (req == NULL) {
125 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
126 		return -ENOMEM;
127 	}
128 
129 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
130 
131 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
132 
133 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
134 	return rc;
135 }
136 
137 int
138 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
139 			uint8_t csi, void *payload, size_t payload_size,
140 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
141 {
142 	struct nvme_request *req;
143 	struct spdk_nvme_cmd *cmd;
144 
145 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
146 					      payload, payload_size,
147 					      cb_fn, cb_arg, false);
148 	if (req == NULL) {
149 		return -ENOMEM;
150 	}
151 
152 	cmd = &req->cmd;
153 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
154 	cmd->cdw10_bits.identify.cns = cns;
155 	cmd->cdw10_bits.identify.cntid = cntid;
156 	cmd->cdw11_bits.identify.csi = csi;
157 	cmd->nsid = nsid;
158 
159 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
160 }
161 
162 int
163 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
164 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
165 {
166 	struct nvme_request			*req;
167 	struct spdk_nvme_cmd			*cmd;
168 	int					rc;
169 
170 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
171 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
172 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
173 					      cb_fn, cb_arg, true);
174 	if (req == NULL) {
175 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
176 		return -ENOMEM;
177 	}
178 
179 	cmd = &req->cmd;
180 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
181 	cmd->nsid = nsid;
182 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
183 
184 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
185 
186 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
187 	return rc;
188 }
189 
190 int
191 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
192 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
193 {
194 	struct nvme_request			*req;
195 	struct spdk_nvme_cmd			*cmd;
196 	int					rc;
197 
198 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
199 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
200 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
201 					      cb_fn, cb_arg, true);
202 	if (req == NULL) {
203 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
204 		return -ENOMEM;
205 	}
206 
207 	cmd = &req->cmd;
208 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
209 	cmd->nsid = nsid;
210 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
211 
212 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
213 
214 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
215 	return rc;
216 }
217 
218 int
219 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
220 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
221 {
222 	struct nvme_request			*req;
223 	struct spdk_nvme_cmd			*cmd;
224 	int					rc;
225 
226 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
227 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
228 					      payload, sizeof(struct spdk_nvme_ns_data),
229 					      cb_fn, cb_arg, true);
230 	if (req == NULL) {
231 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
232 		return -ENOMEM;
233 	}
234 
235 	cmd = &req->cmd;
236 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
237 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
238 
239 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
240 
241 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
242 	return rc;
243 }
244 
245 int
246 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
247 			 void *cb_arg)
248 {
249 	struct nvme_request			*req;
250 	struct spdk_nvme_cmd			*cmd;
251 	int					rc;
252 
253 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
254 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
255 	if (req == NULL) {
256 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
257 		return -ENOMEM;
258 	}
259 
260 	cmd = &req->cmd;
261 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
262 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
263 	cmd->nsid = nsid;
264 
265 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
266 
267 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
268 	return rc;
269 }
270 
271 int
272 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
273 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
274 {
275 	struct nvme_request			*req;
276 	struct spdk_nvme_cmd			*cmd;
277 	int					rc;
278 
279 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
280 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
281 	if (req == NULL) {
282 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
283 		return -ENOMEM;
284 	}
285 
286 	cmd = &req->cmd;
287 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
288 	cmd->dptr.prp.prp1 = prp1;
289 	cmd->dptr.prp.prp2 = prp2;
290 
291 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
292 
293 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
294 	return rc;
295 }
296 
297 int
298 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
299 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
300 {
301 	struct nvme_request *req;
302 	struct spdk_nvme_cmd *cmd;
303 	int rc;
304 
305 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
306 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
307 	if (req == NULL) {
308 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
309 		return -ENOMEM;
310 	}
311 
312 	cmd = &req->cmd;
313 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
314 	cmd->nsid = nsid;
315 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
316 
317 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
318 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
319 
320 	return rc;
321 }
322 
323 int
324 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
325 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
326 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
327 {
328 	struct nvme_request *req;
329 	struct spdk_nvme_cmd *cmd;
330 	int rc;
331 
332 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
333 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
334 					      true);
335 	if (req == NULL) {
336 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
337 		return -ENOMEM;
338 	}
339 
340 	cmd = &req->cmd;
341 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
342 	cmd->cdw10_bits.set_features.fid = feature;
343 	cmd->cdw11 = cdw11;
344 	cmd->cdw12 = cdw12;
345 
346 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
347 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
348 
349 	return rc;
350 }
351 
352 int
353 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
354 				uint32_t cdw11, void *payload, uint32_t payload_size,
355 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
356 {
357 	struct nvme_request *req;
358 	struct spdk_nvme_cmd *cmd;
359 	int rc;
360 
361 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
362 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
363 					      false);
364 	if (req == NULL) {
365 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
366 		return -ENOMEM;
367 	}
368 
369 	cmd = &req->cmd;
370 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
371 	cmd->cdw10_bits.get_features.fid = feature;
372 	cmd->cdw11 = cdw11;
373 
374 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
375 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
376 
377 	return rc;
378 }
379 
380 int
381 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
382 				   uint32_t cdw11, void *payload,
383 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
384 				   void *cb_arg, uint32_t ns_id)
385 {
386 	struct nvme_request *req;
387 	struct spdk_nvme_cmd *cmd;
388 	int rc;
389 
390 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
391 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
392 					      false);
393 	if (req == NULL) {
394 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
395 		return -ENOMEM;
396 	}
397 
398 	cmd = &req->cmd;
399 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
400 	cmd->cdw10_bits.get_features.fid = feature;
401 	cmd->cdw11 = cdw11;
402 	cmd->nsid = ns_id;
403 
404 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
405 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
406 
407 	return rc;
408 }
409 
410 int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
411 				       uint32_t cdw11, uint32_t cdw12, void *payload,
412 				       uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
413 				       void *cb_arg, uint32_t ns_id)
414 {
415 	struct nvme_request *req;
416 	struct spdk_nvme_cmd *cmd;
417 	int rc;
418 
419 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
420 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
421 					      true);
422 	if (req == NULL) {
423 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
424 		return -ENOMEM;
425 	}
426 
427 	cmd = &req->cmd;
428 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
429 	cmd->cdw10_bits.set_features.fid = feature;
430 	cmd->cdw11 = cdw11;
431 	cmd->cdw12 = cdw12;
432 	cmd->nsid = ns_id;
433 
434 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
435 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
436 
437 	return rc;
438 }
439 
440 int
441 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
442 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
443 {
444 	union spdk_nvme_feat_number_of_queues feat_num_queues;
445 
446 	feat_num_queues.raw = 0;
447 	feat_num_queues.bits.nsqr = num_queues - 1;
448 	feat_num_queues.bits.ncqr = num_queues - 1;
449 
450 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
451 					       0,
452 					       NULL, 0, cb_fn, cb_arg);
453 }
454 
455 int
456 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
457 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
458 {
459 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
460 					       cb_fn, cb_arg);
461 }
462 
463 int
464 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
465 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
466 				      void *cb_arg)
467 {
468 	uint32_t cdw11;
469 
470 	cdw11 = config.raw;
471 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
472 					       NULL, 0,
473 					       cb_fn, cb_arg);
474 }
475 
476 int
477 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
478 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
479 {
480 	union spdk_nvme_feat_host_identifier feat_host_identifier;
481 
482 	feat_host_identifier.raw = 0;
483 	if (host_id_size == 16) {
484 		/* 128-bit extended host identifier */
485 		feat_host_identifier.bits.exhid = 1;
486 	} else if (host_id_size == 8) {
487 		/* 64-bit host identifier */
488 		feat_host_identifier.bits.exhid = 0;
489 	} else {
490 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
491 		return -EINVAL;
492 	}
493 
494 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
495 					       feat_host_identifier.raw, 0,
496 					       host_id, host_id_size, cb_fn, cb_arg);
497 }
498 
499 int
500 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
501 				     uint32_t nsid, void *payload, uint32_t payload_size,
502 				     uint64_t offset, uint32_t cdw10,
503 				     uint32_t cdw11, uint32_t cdw14,
504 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
505 {
506 	struct nvme_request *req;
507 	struct spdk_nvme_cmd *cmd;
508 	uint32_t numd, numdl, numdu;
509 	uint32_t lpol, lpou;
510 	int rc;
511 
512 	if (payload_size == 0) {
513 		return -EINVAL;
514 	}
515 
516 	if (offset & 3) {
517 		return -EINVAL;
518 	}
519 
520 	numd = spdk_nvme_bytes_to_numd(payload_size);
521 	numdl = numd & 0xFFFFu;
522 	numdu = (numd >> 16) & 0xFFFFu;
523 
524 	lpol = (uint32_t)offset;
525 	lpou = (uint32_t)(offset >> 32);
526 
527 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
528 
529 	if (offset && !ctrlr->cdata.lpa.edlp) {
530 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
531 		return -EINVAL;
532 	}
533 
534 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
535 					      payload, payload_size, cb_fn, cb_arg, false);
536 	if (req == NULL) {
537 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
538 		return -ENOMEM;
539 	}
540 
541 	cmd = &req->cmd;
542 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
543 	cmd->nsid = nsid;
544 	cmd->cdw10 = cdw10;
545 	cmd->cdw10_bits.get_log_page.numdl = numdl;
546 	cmd->cdw10_bits.get_log_page.lid = log_page;
547 
548 	cmd->cdw11 = cdw11;
549 	cmd->cdw11_bits.get_log_page.numdu = numdu;
550 	cmd->cdw12 = lpol;
551 	cmd->cdw13 = lpou;
552 	cmd->cdw14 = cdw14;
553 
554 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
555 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
556 
557 	return rc;
558 }
559 
560 int
561 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
562 				 uint32_t nsid, void *payload, uint32_t payload_size,
563 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
564 {
565 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
566 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
567 }
568 
569 static void
570 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
571 {
572 	struct nvme_request	*next, *tmp;
573 	int rc;
574 
575 	if (ctrlr->is_resetting || ctrlr->is_destructed) {
576 		return;
577 	}
578 
579 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
580 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
581 		ctrlr->outstanding_aborts++;
582 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
583 		if (rc < 0) {
584 			SPDK_ERRLOG("Failed to submit queued abort.\n");
585 			memset(&next->cpl, 0, sizeof(next->cpl));
586 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
587 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
588 			next->cpl.status.dnr = 1;
589 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
590 			nvme_free_request(next);
591 		} else {
592 			/* If the first abort succeeds, stop iterating. */
593 			break;
594 		}
595 	}
596 }
597 
598 static int
599 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
600 				 struct nvme_request *req)
601 {
602 	/* ACL is a 0's based value. */
603 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
604 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
605 		return 0;
606 	} else {
607 		ctrlr->outstanding_aborts++;
608 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
609 	}
610 }
611 
612 static void
613 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
614 {
615 	struct nvme_request	*req = ctx;
616 	struct spdk_nvme_ctrlr	*ctrlr;
617 
618 	ctrlr = req->qpair->ctrlr;
619 
620 	ctrlr->outstanding_aborts--;
621 	nvme_ctrlr_retry_queued_abort(ctrlr);
622 
623 	req->user_cb_fn(req->user_cb_arg, cpl);
624 }
625 
626 int
627 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
628 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
629 {
630 	int rc;
631 	struct nvme_request *req;
632 	struct spdk_nvme_cmd *cmd;
633 
634 	if (qpair == NULL) {
635 		qpair = ctrlr->adminq;
636 	}
637 
638 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
639 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
640 	if (req == NULL) {
641 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
642 		return -ENOMEM;
643 	}
644 	req->cb_arg = req;
645 	req->user_cb_fn = cb_fn;
646 	req->user_cb_arg = cb_arg;
647 
648 	cmd = &req->cmd;
649 	cmd->opc = SPDK_NVME_OPC_ABORT;
650 	cmd->cdw10_bits.abort.sqid = qpair->id;
651 	cmd->cdw10_bits.abort.cid = cid;
652 
653 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
654 
655 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
656 	return rc;
657 }
658 
659 static void
660 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
661 {
662 	struct nvme_request *req = ctx;
663 	struct nvme_request *parent = req->parent;
664 	struct spdk_nvme_ctrlr *ctrlr;
665 
666 	ctrlr = req->qpair->ctrlr;
667 
668 	ctrlr->outstanding_aborts--;
669 	nvme_ctrlr_retry_queued_abort(ctrlr);
670 
671 	nvme_request_remove_child(parent, req);
672 
673 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
674 		parent->parent_status.cdw0 |= 1U;
675 	}
676 
677 	if (parent->num_children == 0) {
678 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
679 				      parent, &parent->parent_status);
680 		nvme_free_request(parent);
681 	}
682 }
683 
684 static int
685 nvme_request_add_abort(struct nvme_request *req, void *arg)
686 {
687 	struct nvme_request *parent = arg;
688 	struct nvme_request *child;
689 	void *cmd_cb_arg;
690 
691 	cmd_cb_arg = parent->user_cb_arg;
692 
693 	if (req->cb_arg != cmd_cb_arg &&
694 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
695 		return 0;
696 	}
697 
698 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
699 					   nvme_complete_abort_request, NULL);
700 	if (child == NULL) {
701 		return -ENOMEM;
702 	}
703 
704 	child->cb_arg = child;
705 
706 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
707 	/* Copy SQID from the parent. */
708 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
709 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
710 
711 	child->parent = parent;
712 
713 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
714 	parent->num_children++;
715 
716 	return 0;
717 }
718 
719 int
720 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
721 			      void *cmd_cb_arg,
722 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
723 {
724 	int rc = 0;
725 	struct nvme_request *parent, *child, *tmp;
726 	bool child_failed = false;
727 	int aborted = 0;
728 
729 	if (cmd_cb_arg == NULL) {
730 		return -EINVAL;
731 	}
732 
733 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
734 
735 	if (qpair == NULL) {
736 		qpair = ctrlr->adminq;
737 	}
738 
739 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
740 	if (parent == NULL) {
741 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
742 
743 		return -ENOMEM;
744 	}
745 
746 	TAILQ_INIT(&parent->children);
747 	parent->num_children = 0;
748 
749 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
750 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
751 
752 	/* Hold SQID that the requests to abort are associated with.
753 	 * This will be copied to the children.
754 	 *
755 	 * CID is not set here because the parent is not submitted directly
756 	 * and CID is not determined until request to abort is found.
757 	 */
758 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
759 
760 	/* This is used to find request to abort. */
761 	parent->user_cb_arg = cmd_cb_arg;
762 
763 	/* Add an abort request for each outstanding request which has cmd_cb_arg
764 	 * as its callback context.
765 	 */
766 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
767 	if (rc != 0) {
768 		/* Free abort requests already added. */
769 		child_failed = true;
770 	}
771 
772 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
773 		if (spdk_likely(!child_failed)) {
774 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
775 			if (spdk_unlikely(rc != 0)) {
776 				child_failed = true;
777 			}
778 		} else {
779 			/* Free remaining abort requests. */
780 			nvme_request_remove_child(parent, child);
781 			nvme_free_request(child);
782 		}
783 	}
784 
785 	if (spdk_likely(!child_failed)) {
786 		/* There is no error so far. Abort requests were submitted successfully
787 		 * or there was no outstanding request to abort.
788 		 *
789 		 * Hence abort queued requests which has cmd_cb_arg as its callback
790 		 * context next.
791 		 */
792 		aborted = nvme_qpair_abort_queued_reqs(qpair, cmd_cb_arg);
793 		if (parent->num_children == 0) {
794 			/* There was no outstanding request to abort. */
795 			if (aborted > 0) {
796 				/* The queued requests were successfully aborted. Hence
797 				 * complete the parent request with success synchronously.
798 				 */
799 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
800 						      parent, &parent->parent_status);
801 				nvme_free_request(parent);
802 			} else {
803 				/* There was no queued request to abort. */
804 				rc = -ENOENT;
805 			}
806 		}
807 	} else {
808 		/* Failed to add or submit abort request. */
809 		if (parent->num_children != 0) {
810 			/* Return success since we must wait for those children
811 			 * to complete but set the parent request to failure.
812 			 */
813 			parent->parent_status.cdw0 |= 1U;
814 			rc = 0;
815 		}
816 	}
817 
818 	if (rc != 0) {
819 		nvme_free_request(parent);
820 	}
821 
822 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
823 	return rc;
824 }
825 
826 int
827 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
828 			 const struct spdk_nvme_fw_commit *fw_commit,
829 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
830 {
831 	struct nvme_request *req;
832 	struct spdk_nvme_cmd *cmd;
833 	int rc;
834 
835 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
836 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
837 	if (req == NULL) {
838 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
839 		return -ENOMEM;
840 	}
841 
842 	cmd = &req->cmd;
843 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
844 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
845 
846 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
847 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
848 
849 	return rc;
850 
851 }
852 
853 int
854 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
855 				 uint32_t size, uint32_t offset, void *payload,
856 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
857 {
858 	struct nvme_request *req;
859 	struct spdk_nvme_cmd *cmd;
860 	int rc;
861 
862 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
863 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
864 	if (req == NULL) {
865 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
866 		return -ENOMEM;
867 	}
868 
869 	cmd = &req->cmd;
870 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
871 	cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
872 	cmd->cdw11 = offset >> 2;
873 
874 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
875 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
876 
877 	return rc;
878 }
879 
880 int
881 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
882 				     uint16_t spsp, uint8_t nssf, void *payload,
883 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
884 {
885 	struct nvme_request *req;
886 	struct spdk_nvme_cmd *cmd;
887 	int rc;
888 
889 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
890 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
891 					      cb_fn, cb_arg, false);
892 	if (req == NULL) {
893 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
894 		return -ENOMEM;
895 	}
896 
897 	cmd = &req->cmd;
898 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
899 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
900 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
901 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
902 	cmd->cdw10_bits.sec_send_recv.secp = secp;
903 	cmd->cdw11 = payload_size;
904 
905 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
906 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
907 
908 	return rc;
909 }
910 
911 int
912 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
913 				  uint16_t spsp, uint8_t nssf, void *payload,
914 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
915 {
916 	struct nvme_request *req;
917 	struct spdk_nvme_cmd *cmd;
918 	int rc;
919 
920 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
921 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
922 					      cb_fn, cb_arg, true);
923 	if (req == NULL) {
924 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
925 		return -ENOMEM;
926 	}
927 
928 	cmd = &req->cmd;
929 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
930 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
931 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
932 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
933 	cmd->cdw10_bits.sec_send_recv.secp = secp;
934 	cmd->cdw11 = payload_size;
935 
936 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
937 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
938 
939 	return rc;
940 }
941 
942 int
943 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
944 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
945 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
946 {
947 	struct nvme_request *req;
948 	struct spdk_nvme_cmd *cmd;
949 	int rc;
950 
951 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
952 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
953 	if (req == NULL) {
954 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
955 		return -ENOMEM;
956 	}
957 
958 	cmd = &req->cmd;
959 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
960 	cmd->nsid = nsid;
961 	cmd->cdw11 = cdw11;
962 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
963 
964 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
965 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
966 
967 	return rc;
968 }
969 
970 static int
971 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
972 			 uint32_t doper, uint32_t dtype, uint32_t dspec,
973 			 void *payload, uint32_t payload_size, uint32_t cdw12,
974 			 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
975 			 uint16_t opc_type, bool host_to_ctrlr)
976 {
977 	struct nvme_request *req = NULL;
978 	struct spdk_nvme_cmd *cmd = NULL;
979 	int rc;
980 
981 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
982 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
983 					      cb_fn, cb_arg, host_to_ctrlr);
984 	if (req == NULL) {
985 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
986 		return -ENOMEM;
987 	}
988 	cmd = &req->cmd;
989 	cmd->opc = opc_type;
990 	cmd->nsid = nsid;
991 
992 	if ((payload_size >> 2) > 0) {
993 		cmd->cdw10 = (payload_size >> 2) - 1;
994 	}
995 	cmd->cdw11_bits.directive.doper = doper;
996 	cmd->cdw11_bits.directive.dtype = dtype;
997 	cmd->cdw11_bits.directive.dspec = dspec;
998 	cmd->cdw12 = cdw12;
999 	cmd->cdw13 = cdw13;
1000 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
1001 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1002 
1003 	return rc;
1004 }
1005 
1006 int
1007 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1008 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
1009 				   void *payload, uint32_t payload_size, uint32_t cdw12,
1010 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1011 {
1012 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1013 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1014 					SPDK_NVME_OPC_DIRECTIVE_SEND, true);
1015 }
1016 
1017 int
1018 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1019 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
1020 				      void *payload, uint32_t payload_size, uint32_t cdw12,
1021 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1022 {
1023 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1024 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1025 					SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1026 }
1027