xref: /spdk/lib/nvme/nvme_ctrlr_cmd.c (revision dbcc38f096e5336dcc4ab5e60b10202db51c0a38)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "nvme_internal.h"
7 #include "spdk/nvme.h"
8 
9 int
10 spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(struct spdk_nvme_ctrlr *ctrlr,
11 		struct spdk_nvme_qpair *qpair,
12 		struct spdk_nvme_cmd *cmd,
13 		spdk_nvme_cmd_cb cb_fn, void *cb_arg)
14 {
15 	struct nvme_request *req;
16 	struct nvme_payload payload;
17 
18 	if (ctrlr->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
19 		return -EINVAL;
20 	}
21 
22 	memset(&payload, 0, sizeof(payload));
23 	req = nvme_allocate_request(qpair, &payload, 0, 0, cb_fn, cb_arg);
24 
25 	if (req == NULL) {
26 		return -ENOMEM;
27 	}
28 
29 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
30 
31 	return nvme_qpair_submit_request(qpair, req);
32 }
33 
34 int
35 spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
36 			   struct spdk_nvme_qpair *qpair,
37 			   struct spdk_nvme_cmd *cmd,
38 			   void *buf, uint32_t len,
39 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
40 {
41 	struct nvme_request	*req;
42 
43 	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
44 
45 	if (req == NULL) {
46 		return -ENOMEM;
47 	}
48 
49 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
50 
51 	return nvme_qpair_submit_request(qpair, req);
52 }
53 
54 int
55 spdk_nvme_ctrlr_cmd_io_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
56 				   struct spdk_nvme_qpair *qpair,
57 				   struct spdk_nvme_cmd *cmd,
58 				   void *buf, uint32_t len, void *md_buf,
59 				   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
60 {
61 	struct nvme_request *req;
62 	struct nvme_payload payload;
63 	uint32_t md_len = 0;
64 
65 	payload = NVME_PAYLOAD_CONTIG(buf, md_buf);
66 
67 	/* Calculate metadata length */
68 	if (md_buf) {
69 		struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
70 
71 		assert(ns != NULL);
72 		assert(ns->sector_size != 0);
73 		md_len =  len / ns->sector_size * ns->md_size;
74 	}
75 
76 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
77 	if (req == NULL) {
78 		return -ENOMEM;
79 	}
80 
81 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
82 
83 	return nvme_qpair_submit_request(qpair, req);
84 }
85 
86 int
87 spdk_nvme_ctrlr_cmd_iov_raw_with_md(struct spdk_nvme_ctrlr *ctrlr,
88 				    struct spdk_nvme_qpair *qpair,
89 				    struct spdk_nvme_cmd *cmd,
90 				    uint32_t len, void *md_buf,
91 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg,
92 				    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
93 				    spdk_nvme_req_next_sge_cb next_sge_fn)
94 {
95 	struct nvme_request *req;
96 	struct nvme_payload payload;
97 	uint32_t md_len = 0;
98 
99 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
100 		return -EINVAL;
101 	}
102 
103 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, md_buf);
104 
105 	/* Calculate metadata length */
106 	if (md_buf) {
107 		struct spdk_nvme_ns *ns = spdk_nvme_ctrlr_get_ns(ctrlr, cmd->nsid);
108 
109 		assert(ns != NULL);
110 		assert(ns->sector_size != 0);
111 		md_len = len / ns->sector_size * ns->md_size;
112 	}
113 
114 	req = nvme_allocate_request(qpair, &payload, len, md_len, cb_fn, cb_arg);
115 	if (req == NULL) {
116 		return -ENOMEM;
117 	}
118 
119 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
120 
121 	return nvme_qpair_submit_request(qpair, req);
122 }
123 
124 int
125 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
126 			      struct spdk_nvme_cmd *cmd,
127 			      void *buf, uint32_t len,
128 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
129 {
130 	struct nvme_request	*req;
131 	int			rc;
132 
133 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
134 	req = nvme_allocate_request_contig(ctrlr->adminq, buf, len, cb_fn, cb_arg);
135 	if (req == NULL) {
136 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
137 		return -ENOMEM;
138 	}
139 
140 	memcpy(&req->cmd, cmd, sizeof(req->cmd));
141 
142 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
143 
144 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
145 	return rc;
146 }
147 
148 int
149 nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
150 			uint8_t csi, void *payload, size_t payload_size,
151 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
152 {
153 	struct nvme_request *req;
154 	struct spdk_nvme_cmd *cmd;
155 
156 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
157 					      payload, payload_size,
158 					      cb_fn, cb_arg, false);
159 	if (req == NULL) {
160 		return -ENOMEM;
161 	}
162 
163 	cmd = &req->cmd;
164 	cmd->opc = SPDK_NVME_OPC_IDENTIFY;
165 	cmd->cdw10_bits.identify.cns = cns;
166 	cmd->cdw10_bits.identify.cntid = cntid;
167 	cmd->cdw11_bits.identify.csi = csi;
168 	cmd->nsid = nsid;
169 
170 	return nvme_ctrlr_submit_admin_request(ctrlr, req);
171 }
172 
173 int
174 nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
175 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
176 {
177 	struct nvme_request			*req;
178 	struct spdk_nvme_cmd			*cmd;
179 	int					rc;
180 
181 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
182 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
183 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
184 					      cb_fn, cb_arg, true);
185 	if (req == NULL) {
186 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
187 		return -ENOMEM;
188 	}
189 
190 	cmd = &req->cmd;
191 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
192 	cmd->nsid = nsid;
193 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_ATTACH;
194 
195 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
196 
197 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
198 	return rc;
199 }
200 
201 int
202 nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
203 			 struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
204 {
205 	struct nvme_request			*req;
206 	struct spdk_nvme_cmd			*cmd;
207 	int					rc;
208 
209 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
210 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
211 					      payload, sizeof(struct spdk_nvme_ctrlr_list),
212 					      cb_fn, cb_arg, true);
213 	if (req == NULL) {
214 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
215 		return -ENOMEM;
216 	}
217 
218 	cmd = &req->cmd;
219 	cmd->opc = SPDK_NVME_OPC_NS_ATTACHMENT;
220 	cmd->nsid = nsid;
221 	cmd->cdw10_bits.ns_attach.sel = SPDK_NVME_NS_CTRLR_DETACH;
222 
223 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
224 
225 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
226 	return rc;
227 }
228 
229 int
230 nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
231 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
232 {
233 	struct nvme_request			*req;
234 	struct spdk_nvme_cmd			*cmd;
235 	int					rc;
236 
237 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
238 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
239 					      payload, sizeof(struct spdk_nvme_ns_data),
240 					      cb_fn, cb_arg, true);
241 	if (req == NULL) {
242 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
243 		return -ENOMEM;
244 	}
245 
246 	cmd = &req->cmd;
247 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
248 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_CREATE;
249 
250 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
251 
252 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
253 	return rc;
254 }
255 
256 int
257 nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
258 			 void *cb_arg)
259 {
260 	struct nvme_request			*req;
261 	struct spdk_nvme_cmd			*cmd;
262 	int					rc;
263 
264 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
265 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
266 	if (req == NULL) {
267 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
268 		return -ENOMEM;
269 	}
270 
271 	cmd = &req->cmd;
272 	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
273 	cmd->cdw10_bits.ns_manage.sel = SPDK_NVME_NS_MANAGEMENT_DELETE;
274 	cmd->nsid = nsid;
275 
276 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
277 
278 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
279 	return rc;
280 }
281 
282 int
283 nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
284 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
285 {
286 	struct nvme_request			*req;
287 	struct spdk_nvme_cmd			*cmd;
288 	int					rc;
289 
290 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
291 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
292 	if (req == NULL) {
293 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
294 		return -ENOMEM;
295 	}
296 
297 	cmd = &req->cmd;
298 	cmd->opc = SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG;
299 	cmd->dptr.prp.prp1 = prp1;
300 	cmd->dptr.prp.prp2 = prp2;
301 
302 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
303 
304 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
305 	return rc;
306 }
307 
308 int
309 nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
310 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
311 {
312 	struct nvme_request *req;
313 	struct spdk_nvme_cmd *cmd;
314 	int rc;
315 
316 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
317 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
318 	if (req == NULL) {
319 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
320 		return -ENOMEM;
321 	}
322 
323 	cmd = &req->cmd;
324 	cmd->opc = SPDK_NVME_OPC_FORMAT_NVM;
325 	cmd->nsid = nsid;
326 	memcpy(&cmd->cdw10, format, sizeof(uint32_t));
327 
328 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
329 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
330 
331 	return rc;
332 }
333 
334 int
335 spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
336 				uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
337 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
338 {
339 	struct nvme_request *req;
340 	struct spdk_nvme_cmd *cmd;
341 	int rc;
342 
343 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
344 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
345 					      true);
346 	if (req == NULL) {
347 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
348 		return -ENOMEM;
349 	}
350 
351 	cmd = &req->cmd;
352 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
353 	cmd->cdw10_bits.set_features.fid = feature;
354 	cmd->cdw11 = cdw11;
355 	cmd->cdw12 = cdw12;
356 
357 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
358 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
359 
360 	return rc;
361 }
362 
363 int
364 spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
365 				uint32_t cdw11, void *payload, uint32_t payload_size,
366 				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
367 {
368 	struct nvme_request *req;
369 	struct spdk_nvme_cmd *cmd;
370 	int rc;
371 
372 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
373 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
374 					      false);
375 	if (req == NULL) {
376 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
377 		return -ENOMEM;
378 	}
379 
380 	cmd = &req->cmd;
381 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
382 	cmd->cdw10_bits.get_features.fid = feature;
383 	cmd->cdw11 = cdw11;
384 
385 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
386 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
387 
388 	return rc;
389 }
390 
391 int
392 spdk_nvme_ctrlr_cmd_get_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
393 				   uint32_t cdw11, void *payload,
394 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
395 				   void *cb_arg, uint32_t ns_id)
396 {
397 	struct nvme_request *req;
398 	struct spdk_nvme_cmd *cmd;
399 	int rc;
400 
401 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
402 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
403 					      false);
404 	if (req == NULL) {
405 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
406 		return -ENOMEM;
407 	}
408 
409 	cmd = &req->cmd;
410 	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
411 	cmd->cdw10_bits.get_features.fid = feature;
412 	cmd->cdw11 = cdw11;
413 	cmd->nsid = ns_id;
414 
415 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
416 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
417 
418 	return rc;
419 }
420 
421 int
422 spdk_nvme_ctrlr_cmd_set_feature_ns(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
423 				   uint32_t cdw11, uint32_t cdw12, void *payload,
424 				   uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
425 				   void *cb_arg, uint32_t ns_id)
426 {
427 	struct nvme_request *req;
428 	struct spdk_nvme_cmd *cmd;
429 	int rc;
430 
431 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
432 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size, cb_fn, cb_arg,
433 					      true);
434 	if (req == NULL) {
435 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
436 		return -ENOMEM;
437 	}
438 
439 	cmd = &req->cmd;
440 	cmd->opc = SPDK_NVME_OPC_SET_FEATURES;
441 	cmd->cdw10_bits.set_features.fid = feature;
442 	cmd->cdw11 = cdw11;
443 	cmd->cdw12 = cdw12;
444 	cmd->nsid = ns_id;
445 
446 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
447 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
448 
449 	return rc;
450 }
451 
452 int
453 nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
454 			      uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
455 {
456 	union spdk_nvme_feat_number_of_queues feat_num_queues;
457 
458 	feat_num_queues.raw = 0;
459 	feat_num_queues.bits.nsqr = num_queues - 1;
460 	feat_num_queues.bits.ncqr = num_queues - 1;
461 
462 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, feat_num_queues.raw,
463 					       0,
464 					       NULL, 0, cb_fn, cb_arg);
465 }
466 
467 int
468 nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
469 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
470 {
471 	return spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_NUMBER_OF_QUEUES, 0, NULL, 0,
472 					       cb_fn, cb_arg);
473 }
474 
475 int
476 nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
477 				      union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
478 				      void *cb_arg)
479 {
480 	uint32_t cdw11;
481 
482 	cdw11 = config.raw;
483 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, 0,
484 					       NULL, 0,
485 					       cb_fn, cb_arg);
486 }
487 
488 int
489 nvme_ctrlr_cmd_set_host_id(struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
490 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
491 {
492 	union spdk_nvme_feat_host_identifier feat_host_identifier;
493 
494 	feat_host_identifier.raw = 0;
495 	if (host_id_size == 16) {
496 		/* 128-bit extended host identifier */
497 		feat_host_identifier.bits.exhid = 1;
498 	} else if (host_id_size == 8) {
499 		/* 64-bit host identifier */
500 		feat_host_identifier.bits.exhid = 0;
501 	} else {
502 		SPDK_ERRLOG("Invalid host ID size %u\n", host_id_size);
503 		return -EINVAL;
504 	}
505 
506 	return spdk_nvme_ctrlr_cmd_set_feature(ctrlr, SPDK_NVME_FEAT_HOST_IDENTIFIER,
507 					       feat_host_identifier.raw, 0,
508 					       host_id, host_id_size, cb_fn, cb_arg);
509 }
510 
511 int
512 spdk_nvme_ctrlr_cmd_get_log_page_ext(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
513 				     uint32_t nsid, void *payload, uint32_t payload_size,
514 				     uint64_t offset, uint32_t cdw10,
515 				     uint32_t cdw11, uint32_t cdw14,
516 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
517 {
518 	struct nvme_request *req;
519 	struct spdk_nvme_cmd *cmd;
520 	uint32_t numd, numdl, numdu;
521 	uint32_t lpol, lpou;
522 	int rc;
523 
524 	if (payload_size == 0) {
525 		return -EINVAL;
526 	}
527 
528 	if (offset & 3) {
529 		return -EINVAL;
530 	}
531 
532 	numd = spdk_nvme_bytes_to_numd(payload_size);
533 	numdl = numd & 0xFFFFu;
534 	numdu = (numd >> 16) & 0xFFFFu;
535 
536 	lpol = (uint32_t)offset;
537 	lpou = (uint32_t)(offset >> 32);
538 
539 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
540 
541 	if (offset && !ctrlr->cdata.lpa.edlp) {
542 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
543 		return -EINVAL;
544 	}
545 
546 	req = nvme_allocate_request_user_copy(ctrlr->adminq,
547 					      payload, payload_size, cb_fn, cb_arg, false);
548 	if (req == NULL) {
549 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
550 		return -ENOMEM;
551 	}
552 
553 	cmd = &req->cmd;
554 	cmd->opc = SPDK_NVME_OPC_GET_LOG_PAGE;
555 	cmd->nsid = nsid;
556 	cmd->cdw10 = cdw10;
557 	cmd->cdw10_bits.get_log_page.numdl = numdl;
558 	cmd->cdw10_bits.get_log_page.lid = log_page;
559 
560 	cmd->cdw11 = cdw11;
561 	cmd->cdw11_bits.get_log_page.numdu = numdu;
562 	cmd->cdw12 = lpol;
563 	cmd->cdw13 = lpou;
564 	cmd->cdw14 = cdw14;
565 
566 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
567 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
568 
569 	return rc;
570 }
571 
572 int
573 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
574 				 uint32_t nsid, void *payload, uint32_t payload_size,
575 				 uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
576 {
577 	return spdk_nvme_ctrlr_cmd_get_log_page_ext(ctrlr, log_page, nsid, payload,
578 			payload_size, offset, 0, 0, 0, cb_fn, cb_arg);
579 }
580 
581 static void
582 nvme_ctrlr_retry_queued_abort(struct spdk_nvme_ctrlr *ctrlr)
583 {
584 	struct nvme_request	*next, *tmp;
585 	int rc;
586 
587 	if (ctrlr->is_resetting || ctrlr->is_destructed || ctrlr->is_failed) {
588 		/* Don't resubmit aborts if ctrlr is failing */
589 		return;
590 	}
591 
592 	if (spdk_nvme_ctrlr_get_admin_qp_failure_reason(ctrlr) != SPDK_NVME_QPAIR_FAILURE_NONE) {
593 		/* Don't resubmit aborts if admin qpair is failed */
594 		return;
595 	}
596 
597 	STAILQ_FOREACH_SAFE(next, &ctrlr->queued_aborts, stailq, tmp) {
598 		STAILQ_REMOVE_HEAD(&ctrlr->queued_aborts, stailq);
599 		ctrlr->outstanding_aborts++;
600 		rc = nvme_ctrlr_submit_admin_request(ctrlr, next);
601 		if (rc < 0) {
602 			SPDK_ERRLOG("Failed to submit queued abort.\n");
603 			memset(&next->cpl, 0, sizeof(next->cpl));
604 			next->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
605 			next->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
606 			next->cpl.status.dnr = 1;
607 			nvme_complete_request(next->cb_fn, next->cb_arg, next->qpair, next, &next->cpl);
608 		} else {
609 			/* If the first abort succeeds, stop iterating. */
610 			break;
611 		}
612 	}
613 }
614 
615 static int
616 _nvme_ctrlr_submit_abort_request(struct spdk_nvme_ctrlr *ctrlr,
617 				 struct nvme_request *req)
618 {
619 	/* ACL is a 0's based value. */
620 	if (ctrlr->outstanding_aborts >= ctrlr->cdata.acl + 1U) {
621 		STAILQ_INSERT_TAIL(&ctrlr->queued_aborts, req, stailq);
622 		return 0;
623 	} else {
624 		ctrlr->outstanding_aborts++;
625 		return nvme_ctrlr_submit_admin_request(ctrlr, req);
626 	}
627 }
628 
629 static void
630 nvme_ctrlr_cmd_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
631 {
632 	struct nvme_request	*req = ctx;
633 	struct spdk_nvme_ctrlr	*ctrlr;
634 
635 	ctrlr = req->qpair->ctrlr;
636 
637 	assert(ctrlr->outstanding_aborts > 0);
638 	ctrlr->outstanding_aborts--;
639 	nvme_ctrlr_retry_queued_abort(ctrlr);
640 
641 	req->user_cb_fn(req->user_cb_arg, cpl);
642 }
643 
644 int
645 spdk_nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
646 			  uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
647 {
648 	int rc;
649 	struct nvme_request *req;
650 	struct spdk_nvme_cmd *cmd;
651 
652 	if (qpair == NULL) {
653 		qpair = ctrlr->adminq;
654 	}
655 
656 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
657 	req = nvme_allocate_request_null(ctrlr->adminq, nvme_ctrlr_cmd_abort_cpl, NULL);
658 	if (req == NULL) {
659 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
660 		return -ENOMEM;
661 	}
662 	req->cb_arg = req;
663 	req->user_cb_fn = cb_fn;
664 	req->user_cb_arg = cb_arg;
665 
666 	cmd = &req->cmd;
667 	cmd->opc = SPDK_NVME_OPC_ABORT;
668 	cmd->cdw10_bits.abort.sqid = qpair->id;
669 	cmd->cdw10_bits.abort.cid = cid;
670 
671 	rc = _nvme_ctrlr_submit_abort_request(ctrlr, req);
672 
673 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
674 	return rc;
675 }
676 
677 static void
678 nvme_complete_abort_request(void *ctx, const struct spdk_nvme_cpl *cpl)
679 {
680 	struct nvme_request *req = ctx;
681 	struct nvme_request *parent = req->parent;
682 	struct spdk_nvme_ctrlr *ctrlr;
683 
684 	ctrlr = req->qpair->ctrlr;
685 
686 	assert(ctrlr->outstanding_aborts > 0);
687 	ctrlr->outstanding_aborts--;
688 	nvme_ctrlr_retry_queued_abort(ctrlr);
689 
690 	nvme_request_remove_child(parent, req);
691 
692 	if (!spdk_nvme_cpl_is_abort_success(cpl)) {
693 		parent->parent_status.cdw0 |= 1U;
694 	}
695 
696 	if (parent->num_children == 0) {
697 		nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
698 				      parent, &parent->parent_status);
699 	}
700 }
701 
702 static int
703 nvme_request_add_abort(struct nvme_request *req, void *arg)
704 {
705 	struct nvme_request *parent = arg;
706 	struct nvme_request *child;
707 	void *cmd_cb_arg;
708 
709 	cmd_cb_arg = parent->user_cb_arg;
710 
711 	if (req->cb_arg != cmd_cb_arg &&
712 	    (req->parent == NULL || req->parent->cb_arg != cmd_cb_arg)) {
713 		return 0;
714 	}
715 
716 	child = nvme_allocate_request_null(parent->qpair->ctrlr->adminq,
717 					   nvme_complete_abort_request, NULL);
718 	if (child == NULL) {
719 		return -ENOMEM;
720 	}
721 
722 	child->cb_arg = child;
723 
724 	child->cmd.opc = SPDK_NVME_OPC_ABORT;
725 	/* Copy SQID from the parent. */
726 	child->cmd.cdw10_bits.abort.sqid = parent->cmd.cdw10_bits.abort.sqid;
727 	child->cmd.cdw10_bits.abort.cid = req->cmd.cid;
728 
729 	child->parent = parent;
730 
731 	TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
732 	parent->num_children++;
733 
734 	return 0;
735 }
736 
737 int
738 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
739 			      void *cmd_cb_arg,
740 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
741 {
742 	int rc = 0;
743 	struct nvme_request *parent, *child, *tmp;
744 	bool child_failed = false;
745 	int aborted = 0;
746 
747 	if (cmd_cb_arg == NULL) {
748 		return -EINVAL;
749 	}
750 
751 	pthread_mutex_lock(&ctrlr->ctrlr_lock);
752 
753 	if (qpair == NULL) {
754 		qpair = ctrlr->adminq;
755 	}
756 
757 	parent = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
758 	if (parent == NULL) {
759 		pthread_mutex_unlock(&ctrlr->ctrlr_lock);
760 
761 		return -ENOMEM;
762 	}
763 
764 	TAILQ_INIT(&parent->children);
765 	parent->num_children = 0;
766 
767 	parent->cmd.opc = SPDK_NVME_OPC_ABORT;
768 	memset(&parent->parent_status, 0, sizeof(struct spdk_nvme_cpl));
769 
770 	/* Hold SQID that the requests to abort are associated with.
771 	 * This will be copied to the children.
772 	 *
773 	 * CID is not set here because the parent is not submitted directly
774 	 * and CID is not determined until request to abort is found.
775 	 */
776 	parent->cmd.cdw10_bits.abort.sqid = qpair->id;
777 
778 	/* This is used to find request to abort. */
779 	parent->user_cb_arg = cmd_cb_arg;
780 
781 	/* Add an abort request for each outstanding request which has cmd_cb_arg
782 	 * as its callback context.
783 	 */
784 	rc = nvme_transport_qpair_iterate_requests(qpair, nvme_request_add_abort, parent);
785 	if (rc != 0) {
786 		/* Free abort requests already added. */
787 		child_failed = true;
788 	}
789 
790 	TAILQ_FOREACH_SAFE(child, &parent->children, child_tailq, tmp) {
791 		if (spdk_likely(!child_failed)) {
792 			rc = _nvme_ctrlr_submit_abort_request(ctrlr, child);
793 			if (spdk_unlikely(rc != 0)) {
794 				child_failed = true;
795 			}
796 		} else {
797 			/* Free remaining abort requests. */
798 			nvme_request_remove_child(parent, child);
799 			nvme_free_request(child);
800 		}
801 	}
802 
803 	if (spdk_likely(!child_failed)) {
804 		/* There is no error so far. Abort requests were submitted successfully
805 		 * or there was no outstanding request to abort.
806 		 *
807 		 * Hence abort queued requests which has cmd_cb_arg as its callback
808 		 * context next.
809 		 */
810 		aborted = nvme_qpair_abort_queued_reqs_with_cbarg(qpair, cmd_cb_arg);
811 		if (parent->num_children == 0) {
812 			/* There was no outstanding request to abort. */
813 			if (aborted > 0) {
814 				/* The queued requests were successfully aborted. Hence
815 				 * complete the parent request with success synchronously.
816 				 */
817 				nvme_complete_request(parent->cb_fn, parent->cb_arg, parent->qpair,
818 						      parent, &parent->parent_status);
819 			} else {
820 				/* There was no queued request to abort. */
821 				rc = -ENOENT;
822 			}
823 		}
824 	} else {
825 		/* Failed to add or submit abort request. */
826 		if (parent->num_children != 0) {
827 			/* Return success since we must wait for those children
828 			 * to complete but set the parent request to failure.
829 			 */
830 			parent->parent_status.cdw0 |= 1U;
831 			rc = 0;
832 		}
833 	}
834 
835 	if (rc != 0) {
836 		nvme_free_request(parent);
837 	}
838 
839 	pthread_mutex_unlock(&ctrlr->ctrlr_lock);
840 	return rc;
841 }
842 
843 int
844 nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr,
845 			 const struct spdk_nvme_fw_commit *fw_commit,
846 			 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
847 {
848 	struct nvme_request *req;
849 	struct spdk_nvme_cmd *cmd;
850 	int rc;
851 
852 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
853 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
854 	if (req == NULL) {
855 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
856 		return -ENOMEM;
857 	}
858 
859 	cmd = &req->cmd;
860 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_COMMIT;
861 	memcpy(&cmd->cdw10, fw_commit, sizeof(uint32_t));
862 
863 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
864 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
865 
866 	return rc;
867 
868 }
869 
870 int
871 nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
872 				 uint32_t size, uint32_t offset, void *payload,
873 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg)
874 {
875 	struct nvme_request *req;
876 	struct spdk_nvme_cmd *cmd;
877 	int rc;
878 
879 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
880 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, size, cb_fn, cb_arg, true);
881 	if (req == NULL) {
882 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
883 		return -ENOMEM;
884 	}
885 
886 	cmd = &req->cmd;
887 	cmd->opc = SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD;
888 	cmd->cdw10 = spdk_nvme_bytes_to_numd(size);
889 	cmd->cdw11 = offset >> 2;
890 
891 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
892 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
893 
894 	return rc;
895 }
896 
897 int
898 spdk_nvme_ctrlr_cmd_security_receive(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
899 				     uint16_t spsp, uint8_t nssf, void *payload,
900 				     uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
901 {
902 	struct nvme_request *req;
903 	struct spdk_nvme_cmd *cmd;
904 	int rc;
905 
906 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
907 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
908 					      cb_fn, cb_arg, false);
909 	if (req == NULL) {
910 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
911 		return -ENOMEM;
912 	}
913 
914 	cmd = &req->cmd;
915 	cmd->opc = SPDK_NVME_OPC_SECURITY_RECEIVE;
916 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
917 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
918 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
919 	cmd->cdw10_bits.sec_send_recv.secp = secp;
920 	cmd->cdw11 = payload_size;
921 
922 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
923 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
924 
925 	return rc;
926 }
927 
928 int
929 spdk_nvme_ctrlr_cmd_security_send(struct spdk_nvme_ctrlr *ctrlr, uint8_t secp,
930 				  uint16_t spsp, uint8_t nssf, void *payload,
931 				  uint32_t payload_size, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
932 {
933 	struct nvme_request *req;
934 	struct spdk_nvme_cmd *cmd;
935 	int rc;
936 
937 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
938 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
939 					      cb_fn, cb_arg, true);
940 	if (req == NULL) {
941 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
942 		return -ENOMEM;
943 	}
944 
945 	cmd = &req->cmd;
946 	cmd->opc = SPDK_NVME_OPC_SECURITY_SEND;
947 	cmd->cdw10_bits.sec_send_recv.nssf = nssf;
948 	cmd->cdw10_bits.sec_send_recv.spsp0 = (uint8_t)spsp;
949 	cmd->cdw10_bits.sec_send_recv.spsp1 = (uint8_t)(spsp >> 8);
950 	cmd->cdw10_bits.sec_send_recv.secp = secp;
951 	cmd->cdw11 = payload_size;
952 
953 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
954 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
955 
956 	return rc;
957 }
958 
959 int
960 nvme_ctrlr_cmd_sanitize(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
961 			struct spdk_nvme_sanitize *sanitize, uint32_t cdw11,
962 			spdk_nvme_cmd_cb cb_fn, void *cb_arg)
963 {
964 	struct nvme_request *req;
965 	struct spdk_nvme_cmd *cmd;
966 	int rc;
967 
968 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
969 	req = nvme_allocate_request_null(ctrlr->adminq, cb_fn, cb_arg);
970 	if (req == NULL) {
971 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
972 		return -ENOMEM;
973 	}
974 
975 	cmd = &req->cmd;
976 	cmd->opc = SPDK_NVME_OPC_SANITIZE;
977 	cmd->nsid = nsid;
978 	cmd->cdw11 = cdw11;
979 	memcpy(&cmd->cdw10, sanitize, sizeof(cmd->cdw10));
980 
981 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
982 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
983 
984 	return rc;
985 }
986 
987 static int
988 nvme_ctrlr_cmd_directive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
989 			 uint32_t doper, uint32_t dtype, uint32_t dspec,
990 			 void *payload, uint32_t payload_size, uint32_t cdw12,
991 			 uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
992 			 uint16_t opc_type, bool host_to_ctrlr)
993 {
994 	struct nvme_request *req = NULL;
995 	struct spdk_nvme_cmd *cmd = NULL;
996 	int rc;
997 
998 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
999 	req = nvme_allocate_request_user_copy(ctrlr->adminq, payload, payload_size,
1000 					      cb_fn, cb_arg, host_to_ctrlr);
1001 	if (req == NULL) {
1002 		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1003 		return -ENOMEM;
1004 	}
1005 	cmd = &req->cmd;
1006 	cmd->opc = opc_type;
1007 	cmd->nsid = nsid;
1008 
1009 	if ((payload_size >> 2) > 0) {
1010 		cmd->cdw10 = (payload_size >> 2) - 1;
1011 	}
1012 	cmd->cdw11_bits.directive.doper = doper;
1013 	cmd->cdw11_bits.directive.dtype = dtype;
1014 	cmd->cdw11_bits.directive.dspec = dspec;
1015 	cmd->cdw12 = cdw12;
1016 	cmd->cdw13 = cdw13;
1017 	rc = nvme_ctrlr_submit_admin_request(ctrlr, req);
1018 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
1019 
1020 	return rc;
1021 }
1022 
1023 int
1024 spdk_nvme_ctrlr_cmd_directive_send(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1025 				   uint32_t doper, uint32_t dtype, uint32_t dspec,
1026 				   void *payload, uint32_t payload_size, uint32_t cdw12,
1027 				   uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1028 {
1029 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1030 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1031 					SPDK_NVME_OPC_DIRECTIVE_SEND, true);
1032 }
1033 
1034 int
1035 spdk_nvme_ctrlr_cmd_directive_receive(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
1036 				      uint32_t doper, uint32_t dtype, uint32_t dspec,
1037 				      void *payload, uint32_t payload_size, uint32_t cdw12,
1038 				      uint32_t cdw13, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1039 {
1040 	return nvme_ctrlr_cmd_directive(ctrlr, nsid, doper, dtype, dspec,
1041 					payload, payload_size, cdw12, cdw13, cb_fn, cb_arg,
1042 					SPDK_NVME_OPC_DIRECTIVE_RECEIVE, false);
1043 }
1044