xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision 6a9e923da298dda69cbdaf73eed6fb0e85ec2ad2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "nvme_internal.h"
35 
36 int
37 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
38 		struct spdk_nvme_qpair *qpair,
39 		struct spdk_nvme_cmd *cmd,
40 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
41 {
42 	struct nvme_request *req;
43 	struct nvme_payload payload;
44 
45 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
46 		return -EINVAL;
47 	}
48 
49 	memset(&payload, 0, sizeof(payload));
50 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
51 
52 	if (req == NULL) {
53 		return -ENOMEM;
54 	}
55 
56 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
57 
58 	return nvme_qpair_submit_request(qpair, req);
59 }
60 
61 int
62 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
63 			   struct spdk_nvme_qpair *qpair,
64 			   struct spdk_nvme_cmd *cmd,
65 			   void *buf, uint32_t len,
66 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
67 {
68 	struct nvme_request	*req;
69 
70 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
71 
72 	if (req == NULL) {
73 		return -ENOMEM;
74 	}
75 
76 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
77 
78 	return nvme_qpair_submit_request(qpair, req);
79 }
80 
81 int
82 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
83 				   struct spdk_nvme_qpair *qpair,
84 				   struct spdk_nvme_cmd *cmd,
85 				   void *buf, uint32_t len, void *md_buf,
86 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
87 {
88 	struct nvme_request *req;
89 	struct nvme_payload payload;
90 	uint32_t md_len = 0;
91 
92 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
93 
94 	/* Caculate metadata length */
95 	if (md_buf) {
96 		struct spdk_nvme_ns *ns = &ctrlr->ns[cmd->nsid - 1];
97 
98 		assert(ns->sector_size != 0);
99 		md_len =  len / ns->sector_size * ns->md_size;
100 	}
101 
102 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
103 	if (req == NULL) {
104 		return -ENOMEM;
105 	}
106 
107 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
108 
109 	return nvme_qpair_submit_request(qpair, req);
110 }
111 
112 int
113 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
114 			      struct spdk_nvme_cmd *cmd,
115 			      void *buf, uint32_t len,
116 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
117 {
118 	struct nvme_request	*req;
119 	int			rc;
120 
121 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
122 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
123 	if (req == NULL) {
124 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
125 		return -ENOMEM;
126 	}
127 
128 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
129 
130 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
131 
132 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
133 	return rc;
134 }
135 
136 int
137 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
138 			void *payload, size_t payload_size,
139 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
140 {
141 	struct nvme_request *req;
142 	struct spdk_nvme_cmd *cmd;
143 
144 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
145 					      payload, payload_size,
146 					      cb_fn, cb_arg, false);
147 	if (req == NULL) {
148 		return -ENOMEM;
149 	}
150 
151 	cmd = &req->cmd;
152 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
153 	cmd->cdw10_bits.identify.cns = cns;
154 	cmd->cdw10_bits.identify.cntid = cntid;
155 	cmd->nsid = nsid;
156 
157 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
158 }
159 
160 int
161 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
162 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
163 {
164 	struct nvme_request			*req;
165 	struct spdk_nvme_cmd			*cmd;
166 	int					rc;
167 
168 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
169 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
170 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
171 					      cb_fn, cb_arg, true);
172 	if (req == NULL) {
173 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
174 		return -ENOMEM;
175 	}
176 
177 	cmd = &req->cmd;
178 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
179 	cmd->nsid = nsid;
180 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
181 
182 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
183 
184 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
185 	return rc;
186 }
187 
188 int
189 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
190 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
191 {
192 	struct nvme_request			*req;
193 	struct spdk_nvme_cmd			*cmd;
194 	int					rc;
195 
196 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
197 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
198 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
199 					      cb_fn, cb_arg, true);
200 	if (req == NULL) {
201 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
202 		return -ENOMEM;
203 	}
204 
205 	cmd = &req->cmd;
206 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
207 	cmd->nsid = nsid;
208 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
209 
210 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
211 
212 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
213 	return rc;
214 }
215 
216 int
217 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
218 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
219 {
220 	struct nvme_request			*req;
221 	struct spdk_nvme_cmd			*cmd;
222 	int					rc;
223 
224 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
225 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
226 					      payload, sizeof(struct spdk_nvme_ns_data),
227 					      cb_fn, cb_arg, true);
228 	if (req == NULL) {
229 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
230 		return -ENOMEM;
231 	}
232 
233 	cmd = &req->cmd;
234 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
235 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
236 
237 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
238 
239 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
240 	return rc;
241 }
242 
243 int
244 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
245 			 void *cb_arg)
246 {
247 	struct nvme_request			*req;
248 	struct spdk_nvme_cmd			*cmd;
249 	int					rc;
250 
251 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
252 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
253 	if (req == NULL) {
254 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
255 		return -ENOMEM;
256 	}
257 
258 	cmd = &req->cmd;
259 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
260 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
261 	cmd->nsid = nsid;
262 
263 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
264 
265 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
266 	return rc;
267 }
268 
269 int
270 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
271 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
272 {
273 	struct nvme_request			*req;
274 	struct spdk_nvme_cmd			*cmd;
275 	int					rc;
276 
277 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
278 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
279 	if (req == NULL) {
280 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
281 		return -ENOMEM;
282 	}
283 
284 	cmd = &req->cmd;
285 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
286 	cmd->dptr.prp.prp1 = prp1;
287 	cmd->dptr.prp.prp2 = prp2;
288 
289 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
290 
291 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
292 	return rc;
293 }
294 
295 int
296 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
297 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
298 {
299 	struct nvme_request *req;
300 	struct spdk_nvme_cmd *cmd;
301 	int rc;
302 
303 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
304 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
305 	if (req == NULL) {
306 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
307 		return -ENOMEM;
308 	}
309 
310 	cmd = &req->cmd;
311 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
312 	cmd->nsid = nsid;
313 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
314 
315 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
316 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
317 
318 	return rc;
319 }
320 
321 int
322 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
323 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
324 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
325 {
326 	struct nvme_request *req;
327 	struct spdk_nvme_cmd *cmd;
328 	int rc;
329 
330 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
331 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
332 					      true);
333 	if (req == NULL) {
334 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
335 		return -ENOMEM;
336 	}
337 
338 	cmd = &req->cmd;
339 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
340 	cmd->cdw10_bits.set_features.fid = feature;
341 	cmd->cdw11 = cdw11;
342 	cmd->cdw12 = cdw12;
343 
344 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
345 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
346 
347 	return rc;
348 }
349 
350 int
351 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
352 				uint32_t cdw11, void *payload, uint32_t payload_size,
353 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
354 {
355 	struct nvme_request *req;
356 	struct spdk_nvme_cmd *cmd;
357 	int rc;
358 
359 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
360 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
361 					      false);
362 	if (req == NULL) {
363 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
364 		return -ENOMEM;
365 	}
366 
367 	cmd = &req->cmd;
368 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
369 	cmd->cdw10_bits.get_features.fid = feature;
370 	cmd->cdw11 = cdw11;
371 
372 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
373 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
374 
375 	return rc;
376 }
377 
378 int
379 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
380 				   uint32_t cdw11, void *payload,
381 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
382 				   void *cb_arg, uint32_t ns_id)
383 {
384 	struct nvme_request *req;
385 	struct spdk_nvme_cmd *cmd;
386 	int rc;
387 
388 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
389 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
390 					      false);
391 	if (req == NULL) {
392 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
393 		return -ENOMEM;
394 	}
395 
396 	cmd = &req->cmd;
397 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
398 	cmd->cdw10_bits.get_features.fid = feature;
399 	cmd->cdw11 = cdw11;
400 	cmd->nsid = ns_id;
401 
402 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
403 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
404 
405 	return rc;
406 }
407 
408 int spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
409 				       uint32_t cdw11, uint32_t cdw12, void *payload,
410 				       uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
411 				       void *cb_arg, uint32_t ns_id)
412 {
413 	struct nvme_request *req;
414 	struct spdk_nvme_cmd *cmd;
415 	int rc;
416 
417 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
418 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
419 					      true);
420 	if (req == NULL) {
421 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
422 		return -ENOMEM;
423 	}
424 
425 	cmd = &req->cmd;
426 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
427 	cmd->cdw10_bits.set_features.fid = feature;
428 	cmd->cdw11 = cdw11;
429 	cmd->cdw12 = cdw12;
430 	cmd->nsid = ns_id;
431 
432 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
433 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
434 
435 	return rc;
436 }
437 
438 int
439 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
440 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
441 {
442 	union spdk_nvme_feat_number_of_queues feat_num_queues;
443 
444 	feat_num_queues.raw = 0;
445 	feat_num_queues.bits.nsqr = num_queues - 1;
446 	feat_num_queues.bits.ncqr = num_queues - 1;
447 
448 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
449 					       0,
450 					       NULL, 0, cb_fn, cb_arg);
451 }
452 
453 int
454 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
455 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
456 {
457 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
458 					       cb_fn, cb_arg);
459 }
460 
461 int
462 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
463 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
464 				      void *cb_arg)
465 {
466 	uint32_t cdw11;
467 
468 	cdw11 = config.raw;
469 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
470 					       NULL, 0,
471 					       cb_fn, cb_arg);
472 }
473 
474 int
475 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
476 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
477 {
478 	union spdk_nvme_feat_host_identifier feat_host_identifier;
479 
480 	feat_host_identifier.raw = 0;
481 	if (host_id_size == 16) {
482 		/* 128-bit extended host identifier */
483 		feat_host_identifier.bits.exhid = 1;
484 	} else if (host_id_size == 8) {
485 		/* 64-bit host identifier */
486 		feat_host_identifier.bits.exhid = 0;
487 	} else {
488 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
489 		return -EINVAL;
490 	}
491 
492 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
493 					       feat_host_identifier.raw, 0,
494 					       host_id, host_id_size, cb_fn, cb_arg);
495 }
496 
497 int
498 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
499 				     uint32_t nsid, void *payload, uint32_t payload_size,
500 				     uint64_t offset, uint32_t cdw10,
501 				     uint32_t cdw11, uint32_t cdw14,
502 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
503 {
504 	struct nvme_request *req;
505 	struct spdk_nvme_cmd *cmd;
506 	uint32_t numd, numdl, numdu;
507 	uint32_t lpol, lpou;
508 	int rc;
509 
510 	if (payload_size == 0) {
511 		return -EINVAL;
512 	}
513 
514 	if (offset & 3) {
515 		return -EINVAL;
516 	}
517 
518 	numd = payload_size / sizeof(uint32_t) - 1u;
519 	numdl = numd & 0xFFFFu;
520 	numdu = (numd >> 16) & 0xFFFFu;
521 
522 	lpol = (uint32_t)offset;
523 	lpou = (uint32_t)(offset >> 32);
524 
525 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
526 
527 	if (offset && !ctrlr->cdata.lpa.edlp) {
528 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
529 		return -EINVAL;
530 	}
531 
532 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
533 					      payload, payload_size, cb_fn, cb_arg, false);
534 	if (req == NULL) {
535 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
536 		return -ENOMEM;
537 	}
538 
539 	cmd = &req->cmd;
540 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
541 	cmd->nsid = nsid;
542 	cmd->cdw10 = cdw10;
543 	cmd->cdw10_bits.get_log_page.numdl = numdl;
544 	cmd->cdw10_bits.get_log_page.lid = log_page;
545 
546 	cmd->cdw11 = cdw11;
547 	cmd->cdw11_bits.get_log_page.numdu = numdu;
548 	cmd->cdw12 = lpol;
549 	cmd->cdw13 = lpou;
550 	cmd->cdw14 = cdw14;
551 
552 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
553 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
554 
555 	return rc;
556 }
557 
558 int
559 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
560 				 uint32_t nsid, void *payload, uint32_t payload_size,
561 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
562 {
563 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
564 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
565 }
566 
567 static void
568 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
569 {
570 	struct nvme_request	*next, *tmp;
571 	int rc;
572 
573 	if (ctrlr->is_resetting || ctrlr->is_destructed) {
574 		return;
575 	}
576 
577 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
578 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
579 		ctrlr->outstanding_aborts++;
580 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
581 		if (rc < 0) {
582 			SPDK_ERRLOG("Failed to submit queued abort.\n");
583 			memset(&next->cpl, 0, sizeof(next->cpl));
584 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
585 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
586 			next->cpl.status.dnr = 1;
587 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
588 			nvme_free_request(next);
589 		} else {
590 			/* If the first abort succeeds, stop iterating. */
591 			break;
592 		}
593 	}
594 }
595 
596 static int
597 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
598 				 struct nvme_request *req)
599 {
600 	/* ACL is a 0's based value. */
601 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
602 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
603 		return 0;
604 	} else {
605 		ctrlr->outstanding_aborts++;
606 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
607 	}
608 }
609 
610 static void
611 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
612 {
613 	struct nvme_request	*req = ctx;
614 	struct spdk_nvme_ctrlr	*ctrlr;
615 
616 	ctrlr = req->qpair->ctrlr;
617 
618 	ctrlr->outstanding_aborts--;
619 	nvme_ctrlr_retry_queued_abort(ctrlr);
620 
621 	req->user_cb_fn(req->user_cb_arg, cpl);
622 }
623 
624 int
625 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
626 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
627 {
628 	int rc;
629 	struct nvme_request *req;
630 	struct spdk_nvme_cmd *cmd;
631 
632 	if (qpair == NULL) {
633 		qpair = ctrlr->adminq;
634 	}
635 
636 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
637 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
638 	if (req == NULL) {
639 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
640 		return -ENOMEM;
641 	}
642 	req->cb_arg = req;
643 	req->user_cb_fn = cb_fn;
644 	req->user_cb_arg = cb_arg;
645 
646 	cmd = &req->cmd;
647 	cmd->opc = SPDK_NVME_OPC_ABORT;
648 	cmd->cdw10_bits.abort.sqid = qpair->id;
649 	cmd->cdw10_bits.abort.cid = cid;
650 
651 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
652 
653 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
654 	return rc;
655 }
656 
657 static void
658 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
659 {
660 	struct nvme_request *req = ctx;
661 	struct nvme_request *parent = req->parent;
662 	struct spdk_nvme_ctrlr *ctrlr;
663 
664 	ctrlr = req->qpair->ctrlr;
665 
666 	ctrlr->outstanding_aborts--;
667 	nvme_ctrlr_retry_queued_abort(ctrlr);
668 
669 	nvme_request_remove_child(parent, req);
670 
671 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
672 		parent->parent_status.cdw0 |= 1U;
673 	}
674 
675 	if (parent->num_children == 0) {
676 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
677 				      parent, &parent->parent_status);
678 		nvme_free_request(parent);
679 	}
680 }
681 
682 static int
683 nvme_request_add_abort(struct nvme_request *req, void *arg)
684 {
685 	struct nvme_request *parent = arg;
686 	struct nvme_request *child;
687 	void *cmd_cb_arg;
688 
689 	cmd_cb_arg = parent->user_cb_arg;
690 
691 	if (req->cb_arg != cmd_cb_arg &&
692 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
693 		return 0;
694 	}
695 
696 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
697 					   nvme_complete_abort_request, NULL);
698 	if (child == NULL) {
699 		return -ENOMEM;
700 	}
701 
702 	child->cb_arg = child;
703 
704 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
705 	/* Copy SQID from the parent. */
706 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
707 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
708 
709 	child->parent = parent;
710 
711 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
712 	parent->num_children++;
713 
714 	return 0;
715 }
716 
717 int
718 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
719 			      void *cmd_cb_arg,
720 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
721 {
722 	int rc = 0;
723 	struct nvme_request *parent, *child, *tmp;
724 	bool child_failed = false;
725 	int aborted = 0;
726 
727 	if (cmd_cb_arg == NULL) {
728 		return -EINVAL;
729 	}
730 
731 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
732 
733 	if (qpair == NULL) {
734 		qpair = ctrlr->adminq;
735 	}
736 
737 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
738 	if (parent == NULL) {
739 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
740 
741 		return -ENOMEM;
742 	}
743 
744 	TAILQ_INIT(&parent->children);
745 	parent->num_children = 0;
746 
747 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
748 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
749 
750 	/* Hold SQID that the requests to abort are associated with.
751 	 * This will be copied to the children.
752 	 *
753 	 * CID is not set here because the parent is not submitted directly
754 	 * and CID is not determined until request to abort is found.
755 	 */
756 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
757 
758 	/* This is used to find request to abort. */
759 	parent->user_cb_arg = cmd_cb_arg;
760 
761 	/* Add an abort request for each outstanding request which has cmd_cb_arg
762 	 * as its callback context.
763 	 */
764 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
765 	if (rc != 0) {
766 		/* Free abort requests already added. */
767 		child_failed = true;
768 	}
769 
770 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
771 		if (spdk_likely(!child_failed)) {
772 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
773 			if (spdk_unlikely(rc != 0)) {
774 				child_failed = true;
775 			}
776 		} else {
777 			/* Free remaining abort requests. */
778 			nvme_request_remove_child(parent, child);
779 			nvme_free_request(child);
780 		}
781 	}
782 
783 	if (spdk_likely(!child_failed)) {
784 		/* There is no error so far. Abort requests were submitted successfully
785 		 * or there was no outstanding request to abort.
786 		 *
787 		 * Hence abort queued requests which has cmd_cb_arg as its callback
788 		 * context next.
789 		 */
790 		aborted = nvme_qpair_abort_queued_reqs(qpair, cmd_cb_arg);
791 		if (parent->num_children == 0) {
792 			/* There was no outstanding request to abort. */
793 			if (aborted > 0) {
794 				/* The queued requests were successfully aborted. Hence
795 				 * complete the parent request with success synchronously.
796 				 */
797 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
798 						      parent, &parent->parent_status);
799 				nvme_free_request(parent);
800 			} else {
801 				/* There was no queued request to abort. */
802 				rc = -ENOENT;
803 			}
804 		}
805 	} else {
806 		/* Failed to add or submit abort request. */
807 		if (parent->num_children != 0) {
808 			/* Return success since we must wait for those children
809 			 * to complete but set the parent request to failure.
810 			 */
811 			parent->parent_status.cdw0 |= 1U;
812 			rc = 0;
813 		}
814 	}
815 
816 	if (rc != 0) {
817 		nvme_free_request(parent);
818 	}
819 
820 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
821 	return rc;
822 }
823 
824 int
825 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
826 			 const struct spdk_nvme_fw_commit *fw_commit,
827 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
828 {
829 	struct nvme_request *req;
830 	struct spdk_nvme_cmd *cmd;
831 	int rc;
832 
833 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
834 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
835 	if (req == NULL) {
836 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
837 		return -ENOMEM;
838 	}
839 
840 	cmd = &req->cmd;
841 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
842 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
843 
844 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
845 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
846 
847 	return rc;
848 
849 }
850 
851 int
852 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
853 				 uint32_t size, uint32_t offset, void *payload,
854 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
855 {
856 	struct nvme_request *req;
857 	struct spdk_nvme_cmd *cmd;
858 	int rc;
859 
860 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
861 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
862 	if (req == NULL) {
863 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
864 		return -ENOMEM;
865 	}
866 
867 	cmd = &req->cmd;
868 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
869 	cmd->cdw10 = (size >> 2) - 1;
870 	cmd->cdw11 = offset >> 2;
871 
872 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
873 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
874 
875 	return rc;
876 }
877 
878 int
879 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
880 				     uint16_t spsp, uint8_t nssf, void *payload,
881 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
882 {
883 	struct nvme_request *req;
884 	struct spdk_nvme_cmd *cmd;
885 	int rc;
886 
887 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
888 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
889 					      cb_fn, cb_arg, false);
890 	if (req == NULL) {
891 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
892 		return -ENOMEM;
893 	}
894 
895 	cmd = &req->cmd;
896 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
897 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
898 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
899 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
900 	cmd->cdw10_bits.sec_send_recv.secp = secp;
901 	cmd->cdw11 = payload_size;
902 
903 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
904 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
905 
906 	return rc;
907 }
908 
909 int
910 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
911 				  uint16_t spsp, uint8_t nssf, void *payload,
912 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
913 {
914 	struct nvme_request *req;
915 	struct spdk_nvme_cmd *cmd;
916 	int rc;
917 
918 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
919 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
920 					      cb_fn, cb_arg, true);
921 	if (req == NULL) {
922 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
923 		return -ENOMEM;
924 	}
925 
926 	cmd = &req->cmd;
927 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
928 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
929 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
930 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
931 	cmd->cdw10_bits.sec_send_recv.secp = secp;
932 	cmd->cdw11 = payload_size;
933 
934 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
935 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
936 
937 	return rc;
938 }
939 
940 int
941 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
942 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
943 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
944 {
945 	struct nvme_request *req;
946 	struct spdk_nvme_cmd *cmd;
947 	int rc;
948 
949 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
950 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
951 	if (req == NULL) {
952 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
953 		return -ENOMEM;
954 	}
955 
956 	cmd = &req->cmd;
957 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
958 	cmd->nsid = nsid;
959 	cmd->cdw11 = cdw11;
960 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
961 
962 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
963 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
964 
965 	return rc;
966 }
967