xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "nvme_internal.h"
35 
36 int
37 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
38 		struct spdk_nvme_qpair *qpair,
39 		struct spdk_nvme_cmd *cmd,
40 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
41 {
42 	struct nvme_request *req;
43 	struct nvme_payload payload;
44 
45 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
46 		return -EINVAL;
47 	}
48 
49 	memset(&payload, 0, sizeof(payload));
50 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
51 
52 	if (req == NULL) {
53 		return -ENOMEM;
54 	}
55 
56 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
57 
58 	return nvme_qpair_submit_request(qpair, req);
59 }
60 
61 int
62 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
63 			   struct spdk_nvme_qpair *qpair,
64 			   struct spdk_nvme_cmd *cmd,
65 			   void *buf, uint32_t len,
66 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
67 {
68 	struct nvme_request	*req;
69 
70 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
71 
72 	if (req == NULL) {
73 		return -ENOMEM;
74 	}
75 
76 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
77 
78 	return nvme_qpair_submit_request(qpair, req);
79 }
80 
81 int
82 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
83 				   struct spdk_nvme_qpair *qpair,
84 				   struct spdk_nvme_cmd *cmd,
85 				   void *buf, uint32_t len, void *md_buf,
86 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
87 {
88 	struct nvme_request *req;
89 	struct nvme_payload payload;
90 	uint32_t md_len = 0;
91 
92 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
93 
94 	/* Caculate metadata length */
95 	if (md_buf) {
96 		struct spdk_nvme_ns *ns = &ctrlr->ns[cmd->nsid - 1];
97 
98 		assert(ns->sector_size != 0);
99 		md_len =  len / ns->sector_size * ns->md_size;
100 	}
101 
102 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
103 	if (req == NULL) {
104 		return -ENOMEM;
105 	}
106 
107 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
108 
109 	return nvme_qpair_submit_request(qpair, req);
110 }
111 
112 int
113 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
114 			      struct spdk_nvme_cmd *cmd,
115 			      void *buf, uint32_t len,
116 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
117 {
118 	struct nvme_request	*req;
119 	int			rc;
120 
121 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
122 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
123 	if (req == NULL) {
124 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
125 		return -ENOMEM;
126 	}
127 
128 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
129 
130 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
131 
132 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
133 	return rc;
134 }
135 
136 int
137 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
138 			uint8_t csi, void *payload, size_t payload_size,
139 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
140 {
141 	struct nvme_request *req;
142 	struct spdk_nvme_cmd *cmd;
143 
144 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
145 					      payload, payload_size,
146 					      cb_fn, cb_arg, false);
147 	if (req == NULL) {
148 		return -ENOMEM;
149 	}
150 
151 	cmd = &req->cmd;
152 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
153 	cmd->cdw10_bits.identify.cns = cns;
154 	cmd->cdw10_bits.identify.cntid = cntid;
155 	cmd->cdw11_bits.identify.csi = csi;
156 	cmd->nsid = nsid;
157 
158 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
159 }
160 
161 int
162 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
163 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
164 {
165 	struct nvme_request			*req;
166 	struct spdk_nvme_cmd			*cmd;
167 	int					rc;
168 
169 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
170 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
171 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
172 					      cb_fn, cb_arg, true);
173 	if (req == NULL) {
174 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
175 		return -ENOMEM;
176 	}
177 
178 	cmd = &req->cmd;
179 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
180 	cmd->nsid = nsid;
181 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
182 
183 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
184 
185 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
186 	return rc;
187 }
188 
189 int
190 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
191 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
192 {
193 	struct nvme_request			*req;
194 	struct spdk_nvme_cmd			*cmd;
195 	int					rc;
196 
197 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
198 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
199 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
200 					      cb_fn, cb_arg, true);
201 	if (req == NULL) {
202 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
203 		return -ENOMEM;
204 	}
205 
206 	cmd = &req->cmd;
207 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
208 	cmd->nsid = nsid;
209 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
210 
211 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
212 
213 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
214 	return rc;
215 }
216 
217 int
218 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
219 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
220 {
221 	struct nvme_request			*req;
222 	struct spdk_nvme_cmd			*cmd;
223 	int					rc;
224 
225 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
226 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
227 					      payload, sizeof(struct spdk_nvme_ns_data),
228 					      cb_fn, cb_arg, true);
229 	if (req == NULL) {
230 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
231 		return -ENOMEM;
232 	}
233 
234 	cmd = &req->cmd;
235 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
236 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
237 
238 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
239 
240 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
241 	return rc;
242 }
243 
244 int
245 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
246 			 void *cb_arg)
247 {
248 	struct nvme_request			*req;
249 	struct spdk_nvme_cmd			*cmd;
250 	int					rc;
251 
252 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
253 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
254 	if (req == NULL) {
255 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
256 		return -ENOMEM;
257 	}
258 
259 	cmd = &req->cmd;
260 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
261 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
262 	cmd->nsid = nsid;
263 
264 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
265 
266 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
267 	return rc;
268 }
269 
270 int
271 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
272 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
273 {
274 	struct nvme_request			*req;
275 	struct spdk_nvme_cmd			*cmd;
276 	int					rc;
277 
278 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
279 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
280 	if (req == NULL) {
281 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
282 		return -ENOMEM;
283 	}
284 
285 	cmd = &req->cmd;
286 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
287 	cmd->dptr.prp.prp1 = prp1;
288 	cmd->dptr.prp.prp2 = prp2;
289 
290 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
291 
292 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
293 	return rc;
294 }
295 
296 int
297 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
298 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
299 {
300 	struct nvme_request *req;
301 	struct spdk_nvme_cmd *cmd;
302 	int rc;
303 
304 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
305 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
306 	if (req == NULL) {
307 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
308 		return -ENOMEM;
309 	}
310 
311 	cmd = &req->cmd;
312 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
313 	cmd->nsid = nsid;
314 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
315 
316 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
317 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
318 
319 	return rc;
320 }
321 
322 int
323 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
324 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
325 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
326 {
327 	struct nvme_request *req;
328 	struct spdk_nvme_cmd *cmd;
329 	int rc;
330 
331 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
332 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
333 					      true);
334 	if (req == NULL) {
335 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
336 		return -ENOMEM;
337 	}
338 
339 	cmd = &req->cmd;
340 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
341 	cmd->cdw10_bits.set_features.fid = feature;
342 	cmd->cdw11 = cdw11;
343 	cmd->cdw12 = cdw12;
344 
345 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
346 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
347 
348 	return rc;
349 }
350 
351 int
352 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
353 				uint32_t cdw11, void *payload, uint32_t payload_size,
354 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
355 {
356 	struct nvme_request *req;
357 	struct spdk_nvme_cmd *cmd;
358 	int rc;
359 
360 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
361 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
362 					      false);
363 	if (req == NULL) {
364 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
365 		return -ENOMEM;
366 	}
367 
368 	cmd = &req->cmd;
369 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
370 	cmd->cdw10_bits.get_features.fid = feature;
371 	cmd->cdw11 = cdw11;
372 
373 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
374 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
375 
376 	return rc;
377 }
378 
379 int
380 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
381 				   uint32_t cdw11, void *payload,
382 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
383 				   void *cb_arg, uint32_t ns_id)
384 {
385 	struct nvme_request *req;
386 	struct spdk_nvme_cmd *cmd;
387 	int rc;
388 
389 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
390 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
391 					      false);
392 	if (req == NULL) {
393 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
394 		return -ENOMEM;
395 	}
396 
397 	cmd = &req->cmd;
398 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
399 	cmd->cdw10_bits.get_features.fid = feature;
400 	cmd->cdw11 = cdw11;
401 	cmd->nsid = ns_id;
402 
403 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
404 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
405 
406 	return rc;
407 }
408 
409 int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
410 				       uint32_t cdw11, uint32_t cdw12, void *payload,
411 				       uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
412 				       void *cb_arg, uint32_t ns_id)
413 {
414 	struct nvme_request *req;
415 	struct spdk_nvme_cmd *cmd;
416 	int rc;
417 
418 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
419 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
420 					      true);
421 	if (req == NULL) {
422 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
423 		return -ENOMEM;
424 	}
425 
426 	cmd = &req->cmd;
427 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
428 	cmd->cdw10_bits.set_features.fid = feature;
429 	cmd->cdw11 = cdw11;
430 	cmd->cdw12 = cdw12;
431 	cmd->nsid = ns_id;
432 
433 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
434 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
435 
436 	return rc;
437 }
438 
439 int
440 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
441 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
442 {
443 	union spdk_nvme_feat_number_of_queues feat_num_queues;
444 
445 	feat_num_queues.raw = 0;
446 	feat_num_queues.bits.nsqr = num_queues - 1;
447 	feat_num_queues.bits.ncqr = num_queues - 1;
448 
449 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
450 					       0,
451 					       NULL, 0, cb_fn, cb_arg);
452 }
453 
454 int
455 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
456 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
457 {
458 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
459 					       cb_fn, cb_arg);
460 }
461 
462 int
463 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
464 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
465 				      void *cb_arg)
466 {
467 	uint32_t cdw11;
468 
469 	cdw11 = config.raw;
470 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
471 					       NULL, 0,
472 					       cb_fn, cb_arg);
473 }
474 
475 int
476 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
477 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
478 {
479 	union spdk_nvme_feat_host_identifier feat_host_identifier;
480 
481 	feat_host_identifier.raw = 0;
482 	if (host_id_size == 16) {
483 		/* 128-bit extended host identifier */
484 		feat_host_identifier.bits.exhid = 1;
485 	} else if (host_id_size == 8) {
486 		/* 64-bit host identifier */
487 		feat_host_identifier.bits.exhid = 0;
488 	} else {
489 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
490 		return -EINVAL;
491 	}
492 
493 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
494 					       feat_host_identifier.raw, 0,
495 					       host_id, host_id_size, cb_fn, cb_arg);
496 }
497 
498 int
499 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
500 				     uint32_t nsid, void *payload, uint32_t payload_size,
501 				     uint64_t offset, uint32_t cdw10,
502 				     uint32_t cdw11, uint32_t cdw14,
503 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
504 {
505 	struct nvme_request *req;
506 	struct spdk_nvme_cmd *cmd;
507 	uint32_t numd, numdl, numdu;
508 	uint32_t lpol, lpou;
509 	int rc;
510 
511 	if (payload_size == 0) {
512 		return -EINVAL;
513 	}
514 
515 	if (offset & 3) {
516 		return -EINVAL;
517 	}
518 
519 	numd = spdk_nvme_bytes_to_numd(payload_size);
520 	numdl = numd & 0xFFFFu;
521 	numdu = (numd >> 16) & 0xFFFFu;
522 
523 	lpol = (uint32_t)offset;
524 	lpou = (uint32_t)(offset >> 32);
525 
526 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
527 
528 	if (offset && !ctrlr->cdata.lpa.edlp) {
529 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
530 		return -EINVAL;
531 	}
532 
533 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
534 					      payload, payload_size, cb_fn, cb_arg, false);
535 	if (req == NULL) {
536 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
537 		return -ENOMEM;
538 	}
539 
540 	cmd = &req->cmd;
541 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
542 	cmd->nsid = nsid;
543 	cmd->cdw10 = cdw10;
544 	cmd->cdw10_bits.get_log_page.numdl = numdl;
545 	cmd->cdw10_bits.get_log_page.lid = log_page;
546 
547 	cmd->cdw11 = cdw11;
548 	cmd->cdw11_bits.get_log_page.numdu = numdu;
549 	cmd->cdw12 = lpol;
550 	cmd->cdw13 = lpou;
551 	cmd->cdw14 = cdw14;
552 
553 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
554 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
555 
556 	return rc;
557 }
558 
559 int
560 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
561 				 uint32_t nsid, void *payload, uint32_t payload_size,
562 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
563 {
564 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
565 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
566 }
567 
568 static void
569 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
570 {
571 	struct nvme_request	*next, *tmp;
572 	int rc;
573 
574 	if (ctrlr->is_resetting || ctrlr->is_destructed) {
575 		return;
576 	}
577 
578 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
579 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
580 		ctrlr->outstanding_aborts++;
581 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
582 		if (rc < 0) {
583 			SPDK_ERRLOG("Failed to submit queued abort.\n");
584 			memset(&next->cpl, 0, sizeof(next->cpl));
585 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
586 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
587 			next->cpl.status.dnr = 1;
588 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
589 			nvme_free_request(next);
590 		} else {
591 			/* If the first abort succeeds, stop iterating. */
592 			break;
593 		}
594 	}
595 }
596 
597 static int
598 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
599 				 struct nvme_request *req)
600 {
601 	/* ACL is a 0's based value. */
602 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
603 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
604 		return 0;
605 	} else {
606 		ctrlr->outstanding_aborts++;
607 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
608 	}
609 }
610 
611 static void
612 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
613 {
614 	struct nvme_request	*req = ctx;
615 	struct spdk_nvme_ctrlr	*ctrlr;
616 
617 	ctrlr = req->qpair->ctrlr;
618 
619 	ctrlr->outstanding_aborts--;
620 	nvme_ctrlr_retry_queued_abort(ctrlr);
621 
622 	req->user_cb_fn(req->user_cb_arg, cpl);
623 }
624 
625 int
626 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
627 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
628 {
629 	int rc;
630 	struct nvme_request *req;
631 	struct spdk_nvme_cmd *cmd;
632 
633 	if (qpair == NULL) {
634 		qpair = ctrlr->adminq;
635 	}
636 
637 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
638 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
639 	if (req == NULL) {
640 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
641 		return -ENOMEM;
642 	}
643 	req->cb_arg = req;
644 	req->user_cb_fn = cb_fn;
645 	req->user_cb_arg = cb_arg;
646 
647 	cmd = &req->cmd;
648 	cmd->opc = SPDK_NVME_OPC_ABORT;
649 	cmd->cdw10_bits.abort.sqid = qpair->id;
650 	cmd->cdw10_bits.abort.cid = cid;
651 
652 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
653 
654 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
655 	return rc;
656 }
657 
658 static void
659 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
660 {
661 	struct nvme_request *req = ctx;
662 	struct nvme_request *parent = req->parent;
663 	struct spdk_nvme_ctrlr *ctrlr;
664 
665 	ctrlr = req->qpair->ctrlr;
666 
667 	ctrlr->outstanding_aborts--;
668 	nvme_ctrlr_retry_queued_abort(ctrlr);
669 
670 	nvme_request_remove_child(parent, req);
671 
672 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
673 		parent->parent_status.cdw0 |= 1U;
674 	}
675 
676 	if (parent->num_children == 0) {
677 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
678 				      parent, &parent->parent_status);
679 		nvme_free_request(parent);
680 	}
681 }
682 
683 static int
684 nvme_request_add_abort(struct nvme_request *req, void *arg)
685 {
686 	struct nvme_request *parent = arg;
687 	struct nvme_request *child;
688 	void *cmd_cb_arg;
689 
690 	cmd_cb_arg = parent->user_cb_arg;
691 
692 	if (req->cb_arg != cmd_cb_arg &&
693 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
694 		return 0;
695 	}
696 
697 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
698 					   nvme_complete_abort_request, NULL);
699 	if (child == NULL) {
700 		return -ENOMEM;
701 	}
702 
703 	child->cb_arg = child;
704 
705 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
706 	/* Copy SQID from the parent. */
707 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
708 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
709 
710 	child->parent = parent;
711 
712 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
713 	parent->num_children++;
714 
715 	return 0;
716 }
717 
718 int
719 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
720 			      void *cmd_cb_arg,
721 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
722 {
723 	int rc = 0;
724 	struct nvme_request *parent, *child, *tmp;
725 	bool child_failed = false;
726 	int aborted = 0;
727 
728 	if (cmd_cb_arg == NULL) {
729 		return -EINVAL;
730 	}
731 
732 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
733 
734 	if (qpair == NULL) {
735 		qpair = ctrlr->adminq;
736 	}
737 
738 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
739 	if (parent == NULL) {
740 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
741 
742 		return -ENOMEM;
743 	}
744 
745 	TAILQ_INIT(&parent->children);
746 	parent->num_children = 0;
747 
748 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
749 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
750 
751 	/* Hold SQID that the requests to abort are associated with.
752 	 * This will be copied to the children.
753 	 *
754 	 * CID is not set here because the parent is not submitted directly
755 	 * and CID is not determined until request to abort is found.
756 	 */
757 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
758 
759 	/* This is used to find request to abort. */
760 	parent->user_cb_arg = cmd_cb_arg;
761 
762 	/* Add an abort request for each outstanding request which has cmd_cb_arg
763 	 * as its callback context.
764 	 */
765 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
766 	if (rc != 0) {
767 		/* Free abort requests already added. */
768 		child_failed = true;
769 	}
770 
771 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
772 		if (spdk_likely(!child_failed)) {
773 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
774 			if (spdk_unlikely(rc != 0)) {
775 				child_failed = true;
776 			}
777 		} else {
778 			/* Free remaining abort requests. */
779 			nvme_request_remove_child(parent, child);
780 			nvme_free_request(child);
781 		}
782 	}
783 
784 	if (spdk_likely(!child_failed)) {
785 		/* There is no error so far. Abort requests were submitted successfully
786 		 * or there was no outstanding request to abort.
787 		 *
788 		 * Hence abort queued requests which has cmd_cb_arg as its callback
789 		 * context next.
790 		 */
791 		aborted = nvme_qpair_abort_queued_reqs(qpair, cmd_cb_arg);
792 		if (parent->num_children == 0) {
793 			/* There was no outstanding request to abort. */
794 			if (aborted > 0) {
795 				/* The queued requests were successfully aborted. Hence
796 				 * complete the parent request with success synchronously.
797 				 */
798 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
799 						      parent, &parent->parent_status);
800 				nvme_free_request(parent);
801 			} else {
802 				/* There was no queued request to abort. */
803 				rc = -ENOENT;
804 			}
805 		}
806 	} else {
807 		/* Failed to add or submit abort request. */
808 		if (parent->num_children != 0) {
809 			/* Return success since we must wait for those children
810 			 * to complete but set the parent request to failure.
811 			 */
812 			parent->parent_status.cdw0 |= 1U;
813 			rc = 0;
814 		}
815 	}
816 
817 	if (rc != 0) {
818 		nvme_free_request(parent);
819 	}
820 
821 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
822 	return rc;
823 }
824 
825 int
826 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
827 			 const struct spdk_nvme_fw_commit *fw_commit,
828 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
829 {
830 	struct nvme_request *req;
831 	struct spdk_nvme_cmd *cmd;
832 	int rc;
833 
834 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
835 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
836 	if (req == NULL) {
837 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
838 		return -ENOMEM;
839 	}
840 
841 	cmd = &req->cmd;
842 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
843 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
844 
845 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
846 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
847 
848 	return rc;
849 
850 }
851 
852 int
853 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
854 				 uint32_t size, uint32_t offset, void *payload,
855 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
856 {
857 	struct nvme_request *req;
858 	struct spdk_nvme_cmd *cmd;
859 	int rc;
860 
861 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
862 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
863 	if (req == NULL) {
864 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
865 		return -ENOMEM;
866 	}
867 
868 	cmd = &req->cmd;
869 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
870 	cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
871 	cmd->cdw11 = offset >> 2;
872 
873 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
874 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
875 
876 	return rc;
877 }
878 
879 int
880 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
881 				     uint16_t spsp, uint8_t nssf, void *payload,
882 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
883 {
884 	struct nvme_request *req;
885 	struct spdk_nvme_cmd *cmd;
886 	int rc;
887 
888 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
889 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
890 					      cb_fn, cb_arg, false);
891 	if (req == NULL) {
892 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
893 		return -ENOMEM;
894 	}
895 
896 	cmd = &req->cmd;
897 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
898 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
899 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
900 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
901 	cmd->cdw10_bits.sec_send_recv.secp = secp;
902 	cmd->cdw11 = payload_size;
903 
904 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
905 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
906 
907 	return rc;
908 }
909 
910 int
911 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
912 				  uint16_t spsp, uint8_t nssf, void *payload,
913 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
914 {
915 	struct nvme_request *req;
916 	struct spdk_nvme_cmd *cmd;
917 	int rc;
918 
919 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
920 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
921 					      cb_fn, cb_arg, true);
922 	if (req == NULL) {
923 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
924 		return -ENOMEM;
925 	}
926 
927 	cmd = &req->cmd;
928 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
929 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
930 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
931 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
932 	cmd->cdw10_bits.sec_send_recv.secp = secp;
933 	cmd->cdw11 = payload_size;
934 
935 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
936 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
937 
938 	return rc;
939 }
940 
941 int
942 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
943 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
944 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
945 {
946 	struct nvme_request *req;
947 	struct spdk_nvme_cmd *cmd;
948 	int rc;
949 
950 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
951 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
952 	if (req == NULL) {
953 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
954 		return -ENOMEM;
955 	}
956 
957 	cmd = &req->cmd;
958 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
959 	cmd->nsid = nsid;
960 	cmd->cdw11 = cdw11;
961 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
962 
963 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
964 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
965 
966 	return rc;
967 }
968 
969 static int
970 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
971 			 uint32_t doper, uint32_t dtype, uint32_t dspec,
972 			 void *payload, uint32_t payload_size, uint32_t cdw12,
973 			 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
974 			 uint16_t opc_type, bool host_to_ctrlr)
975 {
976 	struct nvme_request *req = NULL;
977 	struct spdk_nvme_cmd *cmd = NULL;
978 	int rc;
979 
980 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
981 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
982 					      cb_fn, cb_arg, host_to_ctrlr);
983 	if (req == NULL) {
984 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
985 		return -ENOMEM;
986 	}
987 	cmd = &req->cmd;
988 	cmd->opc = opc_type;
989 	cmd->nsid = nsid;
990 
991 	cmd->cdw10 = (payload_size >> 2) - 1;
992 	cmd->cdw11_bits.directive.doper = doper;
993 	cmd->cdw11_bits.directive.dtype = dtype;
994 	cmd->cdw11_bits.directive.dspec = dspec;
995 	cmd->cdw12 = cdw12;
996 	cmd->cdw13 = cdw13;
997 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
998 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
999 
1000 	return rc;
1001 }
1002 
1003 int
1004 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1005 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
1006 				   void *payload, uint32_t payload_size, uint32_t cdw12,
1007 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1008 {
1009 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1010 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1011 					SPDK_NVME_OPC_DIRECTIVE_SEND, true);
1012 }
1013 
1014 int
1015 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1016 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
1017 				      void *payload, uint32_t payload_size, uint32_t cdw12,
1018 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1019 {
1020 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1021 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1022 					SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1023 }
1024