xref: /spdk/lib/nvme/nvme_ns_cmd.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  *   Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
7  */
8 
9 #include "nvme_internal.h"
10 
11 static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
12 		struct spdk_nvme_qpair *qpair,
13 		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
14 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
15 		void *cb_arg, uint32_t opc, uint32_t io_flags,
16 		uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc);
17 
18 static bool
19 nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
20 			     uint32_t sectors_per_stripe, uint32_t qdepth)
21 {
22 	uint32_t child_per_io = UINT32_MAX;
23 
24 	/* After a namespace is destroyed(e.g. hotplug), all the fields associated with the
25 	 * namespace will be cleared to zero, the function will return TRUE for this case,
26 	 * and -EINVAL will be returned to caller.
27 	 */
28 	if (sectors_per_stripe > 0) {
29 		child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
30 	} else if (sectors_per_max_io > 0) {
31 		child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
32 	}
33 
34 	SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io);
35 
36 	return child_per_io >= qdepth;
37 }
38 
39 static inline int
40 nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io,
41 		       uint32_t sectors_per_stripe, uint32_t qdepth, int rc)
42 {
43 	assert(rc);
44 	if (rc == -ENOMEM &&
45 	    nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) {
46 		return -EINVAL;
47 	}
48 	return rc;
49 }
50 
51 static inline bool
52 _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags)
53 {
54 	return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) &&
55 	       (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) &&
56 	       (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) &&
57 	       (ns->md_size == 8);
58 }
59 
60 static inline uint32_t
61 _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags)
62 {
63 	return _nvme_md_excluded_from_xfer(ns, io_flags) ?
64 	       ns->sector_size : ns->extended_lba_size;
65 }
66 
67 static inline uint32_t
68 _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags)
69 {
70 	return _nvme_md_excluded_from_xfer(ns, io_flags) ?
71 	       ns->sectors_per_max_io_no_md : ns->sectors_per_max_io;
72 }
73 
74 static struct nvme_request *
75 _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
76 			const struct nvme_payload *payload,
77 			uint32_t payload_offset, uint32_t md_offset,
78 			uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
79 			uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
80 			struct nvme_request *parent, bool check_sgl, int *rc)
81 {
82 	struct nvme_request	*child;
83 
84 	child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
85 				cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, rc);
86 	if (child == NULL) {
87 		nvme_request_free_children(parent);
88 		nvme_free_request(parent);
89 		return NULL;
90 	}
91 
92 	nvme_request_add_child(parent, child);
93 	return child;
94 }
95 
96 static struct nvme_request *
97 _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
98 			   struct spdk_nvme_qpair *qpair,
99 			   const struct nvme_payload *payload,
100 			   uint32_t payload_offset, uint32_t md_offset,
101 			   uint64_t lba, uint32_t lba_count,
102 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
103 			   uint32_t io_flags, struct nvme_request *req,
104 			   uint32_t sectors_per_max_io, uint32_t sector_mask,
105 			   uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
106 {
107 	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
108 	uint32_t		remaining_lba_count = lba_count;
109 	struct nvme_request	*child;
110 
111 	while (remaining_lba_count > 0) {
112 		lba_count = sectors_per_max_io - (lba & sector_mask);
113 		lba_count = spdk_min(remaining_lba_count, lba_count);
114 
115 		child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
116 						lba, lba_count, cb_fn, cb_arg, opc,
117 						io_flags, apptag_mask, apptag, cdw13, req, true, rc);
118 		if (child == NULL) {
119 			return NULL;
120 		}
121 
122 		remaining_lba_count -= lba_count;
123 		lba += lba_count;
124 		payload_offset += lba_count * sector_size;
125 		md_offset += lba_count * ns->md_size;
126 	}
127 
128 	return req;
129 }
130 
131 static inline bool
132 _is_io_flags_valid(uint32_t io_flags)
133 {
134 	if (io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK) {
135 		/* Invalid io_flags */
136 		SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
137 		return false;
138 	}
139 
140 	return true;
141 }
142 
143 static void
144 _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
145 			   uint32_t opc, uint64_t lba, uint32_t lba_count,
146 			   uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
147 			   uint32_t cdw13)
148 {
149 	struct spdk_nvme_cmd	*cmd;
150 
151 	assert(_is_io_flags_valid(io_flags));
152 
153 	cmd = &req->cmd;
154 	cmd->opc = opc;
155 	cmd->nsid = ns->id;
156 
157 	*(uint64_t *)&cmd->cdw10 = lba;
158 
159 	if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
160 		switch (ns->pi_type) {
161 		case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
162 		case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
163 			cmd->cdw14 = (uint32_t)lba;
164 			break;
165 		}
166 	}
167 
168 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
169 
170 	cmd->cdw12 = lba_count - 1;
171 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
172 
173 	cmd->cdw13 = cdw13;
174 
175 	cmd->cdw15 = apptag_mask;
176 	cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
177 }
178 
179 static struct nvme_request *
180 _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
181 			       struct spdk_nvme_qpair *qpair,
182 			       const struct nvme_payload *payload,
183 			       uint32_t payload_offset, uint32_t md_offset,
184 			       uint64_t lba, uint32_t lba_count,
185 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
186 			       uint32_t io_flags, struct nvme_request *req,
187 			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
188 {
189 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
190 	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
191 	void *sgl_cb_arg = req->payload.contig_or_cb_arg;
192 	bool start_valid, end_valid, last_sge, child_equals_parent;
193 	uint64_t child_lba = lba;
194 	uint32_t req_current_length = 0;
195 	uint32_t child_length = 0;
196 	uint32_t sge_length;
197 	uint32_t page_size = qpair->ctrlr->page_size;
198 	uintptr_t address;
199 
200 	reset_sgl_fn(sgl_cb_arg, payload_offset);
201 	next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
202 	while (req_current_length < req->payload_size) {
203 
204 		if (sge_length == 0) {
205 			continue;
206 		} else if (req_current_length + sge_length > req->payload_size) {
207 			sge_length = req->payload_size - req_current_length;
208 		}
209 
210 		/*
211 		 * The start of the SGE is invalid if the start address is not page aligned,
212 		 *  unless it is the first SGE in the child request.
213 		 */
214 		start_valid = child_length == 0 || _is_page_aligned(address, page_size);
215 
216 		/* Boolean for whether this is the last SGE in the parent request. */
217 		last_sge = (req_current_length + sge_length == req->payload_size);
218 
219 		/*
220 		 * The end of the SGE is invalid if the end address is not page aligned,
221 		 *  unless it is the last SGE in the parent request.
222 		 */
223 		end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
224 
225 		/*
226 		 * This child request equals the parent request, meaning that no splitting
227 		 *  was required for the parent request (the one passed into this function).
228 		 *  In this case, we do not create a child request at all - we just send
229 		 *  the original request as a single request at the end of this function.
230 		 */
231 		child_equals_parent = (child_length + sge_length == req->payload_size);
232 
233 		if (start_valid) {
234 			/*
235 			 * The start of the SGE is valid, so advance the length parameters,
236 			 *  to include this SGE with previous SGEs for this child request
237 			 *  (if any).  If it is not valid, we do not advance the length
238 			 *  parameters nor get the next SGE, because we must send what has
239 			 *  been collected before this SGE as a child request.
240 			 */
241 			child_length += sge_length;
242 			req_current_length += sge_length;
243 			if (req_current_length < req->payload_size) {
244 				next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
245 				/*
246 				 * If the next SGE is not page aligned, we will need to create a
247 				 *  child request for what we have so far, and then start a new
248 				 *  child request for the next SGE.
249 				 */
250 				start_valid = _is_page_aligned(address, page_size);
251 			}
252 		}
253 
254 		if (start_valid && end_valid && !last_sge) {
255 			continue;
256 		}
257 
258 		/*
259 		 * We need to create a split here.  Send what we have accumulated so far as a child
260 		 *  request.  Checking if child_equals_parent allows us to *not* create a child request
261 		 *  when no splitting is required - in that case we will fall-through and just create
262 		 *  a single request with no children for the entire I/O.
263 		 */
264 		if (!child_equals_parent) {
265 			struct nvme_request *child;
266 			uint32_t child_lba_count;
267 
268 			if ((child_length % ns->extended_lba_size) != 0) {
269 				SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
270 					    child_length, ns->extended_lba_size);
271 				*rc = -EINVAL;
272 				return NULL;
273 			}
274 			child_lba_count = child_length / ns->extended_lba_size;
275 			/*
276 			 * Note the last parameter is set to "false" - this tells the recursive
277 			 *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
278 			 *  since we have already verified it here.
279 			 */
280 			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
281 							child_lba, child_lba_count,
282 							cb_fn, cb_arg, opc, io_flags,
283 							apptag_mask, apptag, cdw13, req, false, rc);
284 			if (child == NULL) {
285 				return NULL;
286 			}
287 			payload_offset += child_length;
288 			md_offset += child_lba_count * ns->md_size;
289 			child_lba += child_lba_count;
290 			child_length = 0;
291 		}
292 	}
293 
294 	if (child_length == req->payload_size) {
295 		/* No splitting was required, so setup the whole payload as one request. */
296 		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
297 	}
298 
299 	return req;
300 }
301 
302 static struct nvme_request *
303 _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
304 			       struct spdk_nvme_qpair *qpair,
305 			       const struct nvme_payload *payload,
306 			       uint32_t payload_offset, uint32_t md_offset,
307 			       uint64_t lba, uint32_t lba_count,
308 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
309 			       uint32_t io_flags, struct nvme_request *req,
310 			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
311 {
312 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
313 	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
314 	void *sgl_cb_arg = req->payload.contig_or_cb_arg;
315 	uint64_t child_lba = lba;
316 	uint32_t req_current_length = 0;
317 	uint32_t child_length = 0;
318 	uint32_t sge_length;
319 	uint16_t max_sges, num_sges;
320 	uintptr_t address;
321 
322 	max_sges = ns->ctrlr->max_sges;
323 
324 	reset_sgl_fn(sgl_cb_arg, payload_offset);
325 	num_sges = 0;
326 
327 	while (req_current_length < req->payload_size) {
328 		next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
329 
330 		if (req_current_length + sge_length > req->payload_size) {
331 			sge_length = req->payload_size - req_current_length;
332 		}
333 
334 		child_length += sge_length;
335 		req_current_length += sge_length;
336 		num_sges++;
337 
338 		if (num_sges < max_sges && req_current_length < req->payload_size) {
339 			continue;
340 		}
341 
342 		/*
343 		 * We need to create a split here.  Send what we have accumulated so far as a child
344 		 *  request.  Checking if the child equals the full payload allows us to *not*
345 		 *  create a child request when no splitting is required - in that case we will
346 		 *  fall-through and just create a single request with no children for the entire I/O.
347 		 */
348 		if (child_length != req->payload_size) {
349 			struct nvme_request *child;
350 			uint32_t child_lba_count;
351 
352 			if ((child_length % ns->extended_lba_size) != 0) {
353 				SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
354 					    child_length, ns->extended_lba_size);
355 				*rc = -EINVAL;
356 				return NULL;
357 			}
358 			child_lba_count = child_length / ns->extended_lba_size;
359 			/*
360 			 * Note the last parameter is set to "false" - this tells the recursive
361 			 *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
362 			 *  since we have already verified it here.
363 			 */
364 			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
365 							child_lba, child_lba_count,
366 							cb_fn, cb_arg, opc, io_flags,
367 							apptag_mask, apptag, cdw13, req, false, rc);
368 			if (child == NULL) {
369 				return NULL;
370 			}
371 			payload_offset += child_length;
372 			md_offset += child_lba_count * ns->md_size;
373 			child_lba += child_lba_count;
374 			child_length = 0;
375 			num_sges = 0;
376 		}
377 	}
378 
379 	if (child_length == req->payload_size) {
380 		/* No splitting was required, so setup the whole payload as one request. */
381 		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
382 	}
383 
384 	return req;
385 }
386 
387 static inline struct nvme_request *
388 _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
389 		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
390 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
391 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc)
392 {
393 	struct nvme_request	*req;
394 	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
395 	uint32_t		sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags);
396 	uint32_t		sectors_per_stripe = ns->sectors_per_stripe;
397 
398 	assert(rc != NULL);
399 	assert(*rc == 0);
400 
401 	req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
402 				    cb_fn, cb_arg);
403 	if (req == NULL) {
404 		*rc = -ENOMEM;
405 		return NULL;
406 	}
407 
408 	req->payload_offset = payload_offset;
409 	req->md_offset = md_offset;
410 
411 	/* Zone append commands cannot be split. */
412 	if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
413 		assert(ns->csi == SPDK_NVME_CSI_ZNS);
414 		/*
415 		 * As long as we disable driver-assisted striping for Zone append commands,
416 		 * _nvme_ns_cmd_rw() should never cause a proper request to be split.
417 		 * If a request is split, after all, error handling is done in caller functions.
418 		 */
419 		sectors_per_stripe = 0;
420 	}
421 
422 	/*
423 	 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
424 	 * If this controller defines a stripe boundary and this I/O spans a stripe
425 	 *  boundary, split the request into multiple requests and submit each
426 	 *  separately to hardware.
427 	 */
428 	if (sectors_per_stripe > 0 &&
429 	    (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
430 
431 		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
432 						  cb_fn,
433 						  cb_arg, opc,
434 						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag, cdw13, rc);
435 	} else if (lba_count > sectors_per_max_io) {
436 		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
437 						  cb_fn,
438 						  cb_arg, opc,
439 						  io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag, cdw13, rc);
440 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
441 		if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
442 			return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
443 							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
444 							      req, apptag_mask, apptag, cdw13, rc);
445 		} else {
446 			return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
447 							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
448 							      req, apptag_mask, apptag, cdw13, rc);
449 		}
450 	}
451 
452 	_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
453 	return req;
454 }
455 
456 int
457 spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
458 			 uint64_t lba,
459 			 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
460 			 uint32_t io_flags)
461 {
462 	struct nvme_request *req;
463 	struct nvme_payload payload;
464 	int rc = 0;
465 
466 	if (!_is_io_flags_valid(io_flags)) {
467 		return -EINVAL;
468 	}
469 
470 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
471 
472 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
473 			      SPDK_NVME_OPC_COMPARE,
474 			      io_flags, 0,
475 			      0, 0, false, &rc);
476 	if (req != NULL) {
477 		return nvme_qpair_submit_request(qpair, req);
478 	} else {
479 		return nvme_ns_map_failure_rc(lba_count,
480 					      ns->sectors_per_max_io,
481 					      ns->sectors_per_stripe,
482 					      qpair->ctrlr->opts.io_queue_requests,
483 					      rc);
484 	}
485 }
486 
487 int
488 spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
489 				 void *buffer,
490 				 void *metadata,
491 				 uint64_t lba,
492 				 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
493 				 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
494 {
495 	struct nvme_request *req;
496 	struct nvme_payload payload;
497 	int rc = 0;
498 
499 	if (!_is_io_flags_valid(io_flags)) {
500 		return -EINVAL;
501 	}
502 
503 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
504 
505 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
506 			      SPDK_NVME_OPC_COMPARE,
507 			      io_flags,
508 			      apptag_mask, apptag, 0, false, &rc);
509 	if (req != NULL) {
510 		return nvme_qpair_submit_request(qpair, req);
511 	} else {
512 		return nvme_ns_map_failure_rc(lba_count,
513 					      ns->sectors_per_max_io,
514 					      ns->sectors_per_stripe,
515 					      qpair->ctrlr->opts.io_queue_requests,
516 					      rc);
517 	}
518 }
519 
520 int
521 spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
522 			  uint64_t lba, uint32_t lba_count,
523 			  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
524 			  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
525 			  spdk_nvme_req_next_sge_cb next_sge_fn)
526 {
527 	struct nvme_request *req;
528 	struct nvme_payload payload;
529 	int rc = 0;
530 
531 	if (!_is_io_flags_valid(io_flags)) {
532 		return -EINVAL;
533 	}
534 
535 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
536 		return -EINVAL;
537 	}
538 
539 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
540 
541 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
542 			      SPDK_NVME_OPC_COMPARE,
543 			      io_flags, 0, 0, 0, true, &rc);
544 	if (req != NULL) {
545 		return nvme_qpair_submit_request(qpair, req);
546 	} else {
547 		return nvme_ns_map_failure_rc(lba_count,
548 					      ns->sectors_per_max_io,
549 					      ns->sectors_per_stripe,
550 					      qpair->ctrlr->opts.io_queue_requests,
551 					      rc);
552 	}
553 }
554 
555 int
556 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
557 				  uint64_t lba, uint32_t lba_count,
558 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
559 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
560 				  spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
561 				  uint16_t apptag_mask, uint16_t apptag)
562 {
563 	struct nvme_request *req;
564 	struct nvme_payload payload;
565 	int rc = 0;
566 
567 	if (!_is_io_flags_valid(io_flags)) {
568 		return -EINVAL;
569 	}
570 
571 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
572 		return -EINVAL;
573 	}
574 
575 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
576 
577 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
578 			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, &rc);
579 	if (req != NULL) {
580 		return nvme_qpair_submit_request(qpair, req);
581 	} else {
582 		return nvme_ns_map_failure_rc(lba_count,
583 					      ns->sectors_per_max_io,
584 					      ns->sectors_per_stripe,
585 					      qpair->ctrlr->opts.io_queue_requests,
586 					      rc);
587 	}
588 }
589 
590 int
591 spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
592 		      uint64_t lba,
593 		      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
594 		      uint32_t io_flags)
595 {
596 	struct nvme_request *req;
597 	struct nvme_payload payload;
598 	int rc = 0;
599 
600 	if (!_is_io_flags_valid(io_flags)) {
601 		return -EINVAL;
602 	}
603 
604 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
605 
606 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
607 			      io_flags, 0,
608 			      0, 0, false, &rc);
609 	if (req != NULL) {
610 		return nvme_qpair_submit_request(qpair, req);
611 	} else {
612 		return nvme_ns_map_failure_rc(lba_count,
613 					      ns->sectors_per_max_io,
614 					      ns->sectors_per_stripe,
615 					      qpair->ctrlr->opts.io_queue_requests,
616 					      rc);
617 	}
618 }
619 
620 int
621 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
622 			      void *metadata,
623 			      uint64_t lba,
624 			      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
625 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
626 {
627 	struct nvme_request *req;
628 	struct nvme_payload payload;
629 	int rc = 0;
630 
631 	if (!_is_io_flags_valid(io_flags)) {
632 		return -EINVAL;
633 	}
634 
635 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
636 
637 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
638 			      io_flags,
639 			      apptag_mask, apptag, 0, false, &rc);
640 	if (req != NULL) {
641 		return nvme_qpair_submit_request(qpair, req);
642 	} else {
643 		return nvme_ns_map_failure_rc(lba_count,
644 					      ns->sectors_per_max_io,
645 					      ns->sectors_per_stripe,
646 					      qpair->ctrlr->opts.io_queue_requests,
647 					      rc);
648 	}
649 }
650 
651 int
652 spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
653 		       uint64_t lba, uint32_t lba_count,
654 		       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
655 		       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
656 		       spdk_nvme_req_next_sge_cb next_sge_fn)
657 {
658 	struct nvme_request *req;
659 	struct nvme_payload payload;
660 	int rc = 0;
661 
662 	if (!_is_io_flags_valid(io_flags)) {
663 		return -EINVAL;
664 	}
665 
666 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
667 		return -EINVAL;
668 	}
669 
670 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
671 
672 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
673 			      io_flags, 0, 0, 0, true, &rc);
674 	if (req != NULL) {
675 		return nvme_qpair_submit_request(qpair, req);
676 	} else {
677 		return nvme_ns_map_failure_rc(lba_count,
678 					      ns->sectors_per_max_io,
679 					      ns->sectors_per_stripe,
680 					      qpair->ctrlr->opts.io_queue_requests,
681 					      rc);
682 	}
683 }
684 
685 int
686 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
687 			       uint64_t lba, uint32_t lba_count,
688 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
689 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
690 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
691 			       uint16_t apptag_mask, uint16_t apptag)
692 {
693 	struct nvme_request *req;
694 	struct nvme_payload payload;
695 	int rc = 0;
696 
697 	if (!_is_io_flags_valid(io_flags)) {
698 		return -EINVAL;
699 	}
700 
701 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
702 		return -EINVAL;
703 	}
704 
705 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
706 
707 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
708 			      io_flags, apptag_mask, apptag, 0, true, &rc);
709 	if (req != NULL) {
710 		return nvme_qpair_submit_request(qpair, req);
711 	} else {
712 		return nvme_ns_map_failure_rc(lba_count,
713 					      ns->sectors_per_max_io,
714 					      ns->sectors_per_stripe,
715 					      qpair->ctrlr->opts.io_queue_requests,
716 					      rc);
717 	}
718 }
719 
720 int
721 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
722 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
723 			   void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
724 			   spdk_nvme_req_next_sge_cb next_sge_fn,
725 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
726 {
727 	struct nvme_request *req;
728 	struct nvme_payload payload;
729 	int rc = 0;
730 
731 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
732 		return -EINVAL;
733 	}
734 
735 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
736 
737 	if (opts) {
738 		if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
739 			return -EINVAL;
740 		}
741 
742 		payload.opts = opts;
743 		payload.md = opts->metadata;
744 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
745 				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);
746 
747 	} else {
748 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
749 				      0, 0, 0, 0, true, &rc);
750 	}
751 
752 	if (req != NULL) {
753 		return nvme_qpair_submit_request(qpair, req);
754 	} else {
755 		return nvme_ns_map_failure_rc(lba_count,
756 					      ns->sectors_per_max_io,
757 					      ns->sectors_per_stripe,
758 					      qpair->ctrlr->opts.io_queue_requests,
759 					      rc);
760 	}
761 }
762 
763 int
764 spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
765 		       void *buffer, uint64_t lba,
766 		       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
767 		       uint32_t io_flags)
768 {
769 	struct nvme_request *req;
770 	struct nvme_payload payload;
771 	int rc = 0;
772 
773 	if (!_is_io_flags_valid(io_flags)) {
774 		return -EINVAL;
775 	}
776 
777 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
778 
779 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
780 			      io_flags, 0, 0, 0, false, &rc);
781 	if (req != NULL) {
782 		return nvme_qpair_submit_request(qpair, req);
783 	} else {
784 		return nvme_ns_map_failure_rc(lba_count,
785 					      ns->sectors_per_max_io,
786 					      ns->sectors_per_stripe,
787 					      qpair->ctrlr->opts.io_queue_requests,
788 					      rc);
789 	}
790 }
791 
792 static int
793 nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags)
794 {
795 	uint32_t sector_size;
796 
797 	/* Not all NVMe Zoned Namespaces support the zone append command. */
798 	if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) {
799 		return -EINVAL;
800 	}
801 
802 	sector_size =  _nvme_get_host_buffer_sector_size(ns, io_flags);
803 
804 	/* Fail a too large zone append command early. */
805 	if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) {
806 		return -EINVAL;
807 	}
808 
809 	return 0;
810 }
811 
812 int
813 nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
814 				void *buffer, void *metadata, uint64_t zslba,
815 				uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
816 				uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
817 {
818 	struct nvme_request *req;
819 	struct nvme_payload payload;
820 	int rc = 0;
821 
822 	if (!_is_io_flags_valid(io_flags)) {
823 		return -EINVAL;
824 	}
825 
826 	rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
827 	if (rc) {
828 		return rc;
829 	}
830 
831 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
832 
833 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
834 			      SPDK_NVME_OPC_ZONE_APPEND,
835 			      io_flags, apptag_mask, apptag, 0, false, &rc);
836 	if (req != NULL) {
837 		/*
838 		 * Zone append commands cannot be split (num_children has to be 0).
839 		 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split
840 		 * to happen, since a too large request would have already been failed by
841 		 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts.
842 		 */
843 		assert(req->num_children == 0);
844 		if (req->num_children) {
845 			nvme_request_free_children(req);
846 			nvme_free_request(req);
847 			return -EINVAL;
848 		}
849 		return nvme_qpair_submit_request(qpair, req);
850 	} else {
851 		return nvme_ns_map_failure_rc(lba_count,
852 					      ns->sectors_per_max_io,
853 					      ns->sectors_per_stripe,
854 					      qpair->ctrlr->opts.io_queue_requests,
855 					      rc);
856 	}
857 }
858 
859 int
860 nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
861 				 uint64_t zslba, uint32_t lba_count,
862 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
863 				 spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
864 				 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
865 				 uint16_t apptag_mask, uint16_t apptag)
866 {
867 	struct nvme_request *req;
868 	struct nvme_payload payload;
869 	int rc = 0;
870 
871 	if (!_is_io_flags_valid(io_flags)) {
872 		return -EINVAL;
873 	}
874 
875 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
876 		return -EINVAL;
877 	}
878 
879 	rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
880 	if (rc) {
881 		return rc;
882 	}
883 
884 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
885 
886 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
887 			      SPDK_NVME_OPC_ZONE_APPEND,
888 			      io_flags, apptag_mask, apptag, 0, true, &rc);
889 	if (req != NULL) {
890 		/*
891 		 * Zone append commands cannot be split (num_children has to be 0).
892 		 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split.
893 		 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp()
894 		 * do not always cause a request to be split. These functions verify payload size,
895 		 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs).
896 		 * If any of the verifications fail, they will split the request.
897 		 * In our case, a split is very unlikely, since we already verified the size using
898 		 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions
899 		 * in order to perform the verification part. If they do cause a split, we return
900 		 * an error here. For proper requests, these functions will never cause a split.
901 		 */
902 		if (req->num_children) {
903 			nvme_request_free_children(req);
904 			nvme_free_request(req);
905 			return -EINVAL;
906 		}
907 		return nvme_qpair_submit_request(qpair, req);
908 	} else {
909 		return nvme_ns_map_failure_rc(lba_count,
910 					      ns->sectors_per_max_io,
911 					      ns->sectors_per_stripe,
912 					      qpair->ctrlr->opts.io_queue_requests,
913 					      rc);
914 	}
915 }
916 
917 int
918 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
919 			       void *buffer, void *metadata, uint64_t lba,
920 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
921 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
922 {
923 	struct nvme_request *req;
924 	struct nvme_payload payload;
925 	int rc = 0;
926 
927 	if (!_is_io_flags_valid(io_flags)) {
928 		return -EINVAL;
929 	}
930 
931 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
932 
933 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
934 			      io_flags, apptag_mask, apptag, 0, false, &rc);
935 	if (req != NULL) {
936 		return nvme_qpair_submit_request(qpair, req);
937 	} else {
938 		return nvme_ns_map_failure_rc(lba_count,
939 					      ns->sectors_per_max_io,
940 					      ns->sectors_per_stripe,
941 					      qpair->ctrlr->opts.io_queue_requests,
942 					      rc);
943 	}
944 }
945 
946 int
947 spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
948 			uint64_t lba, uint32_t lba_count,
949 			spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
950 			spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
951 			spdk_nvme_req_next_sge_cb next_sge_fn)
952 {
953 	struct nvme_request *req;
954 	struct nvme_payload payload;
955 	int rc = 0;
956 
957 	if (!_is_io_flags_valid(io_flags)) {
958 		return -EINVAL;
959 	}
960 
961 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
962 		return -EINVAL;
963 	}
964 
965 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
966 
967 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
968 			      io_flags, 0, 0, 0, true, &rc);
969 	if (req != NULL) {
970 		return nvme_qpair_submit_request(qpair, req);
971 	} else {
972 		return nvme_ns_map_failure_rc(lba_count,
973 					      ns->sectors_per_max_io,
974 					      ns->sectors_per_stripe,
975 					      qpair->ctrlr->opts.io_queue_requests,
976 					      rc);
977 	}
978 }
979 
980 int
981 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
982 				uint64_t lba, uint32_t lba_count,
983 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
984 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
985 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
986 				uint16_t apptag_mask, uint16_t apptag)
987 {
988 	struct nvme_request *req;
989 	struct nvme_payload payload;
990 	int rc = 0;
991 
992 	if (!_is_io_flags_valid(io_flags)) {
993 		return -EINVAL;
994 	}
995 
996 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
997 		return -EINVAL;
998 	}
999 
1000 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1001 
1002 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1003 			      io_flags, apptag_mask, apptag, 0, true, &rc);
1004 	if (req != NULL) {
1005 		return nvme_qpair_submit_request(qpair, req);
1006 	} else {
1007 		return nvme_ns_map_failure_rc(lba_count,
1008 					      ns->sectors_per_max_io,
1009 					      ns->sectors_per_stripe,
1010 					      qpair->ctrlr->opts.io_queue_requests,
1011 					      rc);
1012 	}
1013 }
1014 
1015 int
1016 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
1017 			    uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1018 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1019 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1020 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1021 {
1022 	struct nvme_request *req;
1023 	struct nvme_payload payload;
1024 	int rc = 0;
1025 
1026 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1027 		return -EINVAL;
1028 	}
1029 
1030 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
1031 
1032 	if (opts) {
1033 		if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
1034 			return -EINVAL;
1035 		}
1036 
1037 		payload.opts = opts;
1038 		payload.md = opts->metadata;
1039 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1040 				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);
1041 
1042 	} else {
1043 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1044 				      0, 0, 0, 0, true, &rc);
1045 	}
1046 
1047 	if (req != NULL) {
1048 		return nvme_qpair_submit_request(qpair, req);
1049 	} else {
1050 		return nvme_ns_map_failure_rc(lba_count,
1051 					      ns->sectors_per_max_io,
1052 					      ns->sectors_per_stripe,
1053 					      qpair->ctrlr->opts.io_queue_requests,
1054 					      rc);
1055 	}
1056 }
1057 
1058 int
1059 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1060 			      uint64_t lba, uint32_t lba_count,
1061 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1062 			      uint32_t io_flags)
1063 {
1064 	struct nvme_request	*req;
1065 	struct spdk_nvme_cmd	*cmd;
1066 	uint64_t		*tmp_lba;
1067 
1068 	if (!_is_io_flags_valid(io_flags)) {
1069 		return -EINVAL;
1070 	}
1071 
1072 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1073 		return -EINVAL;
1074 	}
1075 
1076 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1077 	if (req == NULL) {
1078 		return -ENOMEM;
1079 	}
1080 
1081 	cmd = &req->cmd;
1082 	cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
1083 	cmd->nsid = ns->id;
1084 
1085 	tmp_lba = (uint64_t *)&cmd->cdw10;
1086 	*tmp_lba = lba;
1087 	cmd->cdw12 = lba_count - 1;
1088 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1089 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
1090 
1091 	return nvme_qpair_submit_request(qpair, req);
1092 }
1093 
1094 int
1095 spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1096 			uint64_t lba, uint32_t lba_count,
1097 			spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1098 			uint32_t io_flags)
1099 {
1100 	struct nvme_request	*req;
1101 	struct spdk_nvme_cmd	*cmd;
1102 
1103 	if (!_is_io_flags_valid(io_flags)) {
1104 		return -EINVAL;
1105 	}
1106 
1107 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1108 		return -EINVAL;
1109 	}
1110 
1111 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1112 	if (req == NULL) {
1113 		return -ENOMEM;
1114 	}
1115 
1116 	cmd = &req->cmd;
1117 	cmd->opc = SPDK_NVME_OPC_VERIFY;
1118 	cmd->nsid = ns->id;
1119 
1120 	*(uint64_t *)&cmd->cdw10 = lba;
1121 	cmd->cdw12 = lba_count - 1;
1122 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1123 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
1124 
1125 	return nvme_qpair_submit_request(qpair, req);
1126 }
1127 
1128 int
1129 spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1130 				     uint64_t lba, uint32_t lba_count,
1131 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1132 {
1133 	struct nvme_request	*req;
1134 	struct spdk_nvme_cmd	*cmd;
1135 	uint64_t		*tmp_lba;
1136 
1137 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1138 		return -EINVAL;
1139 	}
1140 
1141 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1142 	if (req == NULL) {
1143 		return -ENOMEM;
1144 	}
1145 
1146 	cmd = &req->cmd;
1147 	cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
1148 	cmd->nsid = ns->id;
1149 
1150 	tmp_lba = (uint64_t *)&cmd->cdw10;
1151 	*tmp_lba = lba;
1152 	cmd->cdw12 = lba_count - 1;
1153 
1154 	return nvme_qpair_submit_request(qpair, req);
1155 }
1156 
1157 int
1158 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1159 				    uint32_t type,
1160 				    const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1161 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1162 {
1163 	struct nvme_request	*req;
1164 	struct spdk_nvme_cmd	*cmd;
1165 
1166 	if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
1167 		return -EINVAL;
1168 	}
1169 
1170 	if (ranges == NULL) {
1171 		return -EINVAL;
1172 	}
1173 
1174 	req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
1175 					      num_ranges * sizeof(struct spdk_nvme_dsm_range),
1176 					      cb_fn, cb_arg, true);
1177 	if (req == NULL) {
1178 		return -ENOMEM;
1179 	}
1180 
1181 	cmd = &req->cmd;
1182 	cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
1183 	cmd->nsid = ns->id;
1184 
1185 	cmd->cdw10_bits.dsm.nr = num_ranges - 1;
1186 	cmd->cdw11 = type;
1187 
1188 	return nvme_qpair_submit_request(qpair, req);
1189 }
1190 
1191 int
1192 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1193 		      const struct spdk_nvme_scc_source_range *ranges,
1194 		      uint16_t num_ranges, uint64_t dest_lba,
1195 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1196 {
1197 	struct nvme_request	*req;
1198 	struct spdk_nvme_cmd	*cmd;
1199 
1200 	if (num_ranges == 0) {
1201 		return -EINVAL;
1202 	}
1203 
1204 	if (ranges == NULL) {
1205 		return -EINVAL;
1206 	}
1207 
1208 	req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
1209 					      num_ranges * sizeof(struct spdk_nvme_scc_source_range),
1210 					      cb_fn, cb_arg, true);
1211 	if (req == NULL) {
1212 		return -ENOMEM;
1213 	}
1214 
1215 	cmd = &req->cmd;
1216 	cmd->opc = SPDK_NVME_OPC_COPY;
1217 	cmd->nsid = ns->id;
1218 
1219 	*(uint64_t *)&cmd->cdw10 = dest_lba;
1220 	cmd->cdw12 = num_ranges - 1;
1221 
1222 	return nvme_qpair_submit_request(qpair, req);
1223 }
1224 
1225 int
1226 spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1227 		       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1228 {
1229 	struct nvme_request	*req;
1230 	struct spdk_nvme_cmd	*cmd;
1231 
1232 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1233 	if (req == NULL) {
1234 		return -ENOMEM;
1235 	}
1236 
1237 	cmd = &req->cmd;
1238 	cmd->opc = SPDK_NVME_OPC_FLUSH;
1239 	cmd->nsid = ns->id;
1240 
1241 	return nvme_qpair_submit_request(qpair, req);
1242 }
1243 
1244 int
1245 spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
1246 				      struct spdk_nvme_qpair *qpair,
1247 				      struct spdk_nvme_reservation_register_data *payload,
1248 				      bool ignore_key,
1249 				      enum spdk_nvme_reservation_register_action action,
1250 				      enum spdk_nvme_reservation_register_cptpl cptpl,
1251 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1252 {
1253 	struct nvme_request	*req;
1254 	struct spdk_nvme_cmd	*cmd;
1255 
1256 	req = nvme_allocate_request_user_copy(qpair,
1257 					      payload, sizeof(struct spdk_nvme_reservation_register_data),
1258 					      cb_fn, cb_arg, true);
1259 	if (req == NULL) {
1260 		return -ENOMEM;
1261 	}
1262 
1263 	cmd = &req->cmd;
1264 	cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
1265 	cmd->nsid = ns->id;
1266 
1267 	cmd->cdw10_bits.resv_register.rrega = action;
1268 	cmd->cdw10_bits.resv_register.iekey = ignore_key;
1269 	cmd->cdw10_bits.resv_register.cptpl = cptpl;
1270 
1271 	return nvme_qpair_submit_request(qpair, req);
1272 }
1273 
1274 int
1275 spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
1276 				     struct spdk_nvme_qpair *qpair,
1277 				     struct spdk_nvme_reservation_key_data *payload,
1278 				     bool ignore_key,
1279 				     enum spdk_nvme_reservation_release_action action,
1280 				     enum spdk_nvme_reservation_type type,
1281 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1282 {
1283 	struct nvme_request	*req;
1284 	struct spdk_nvme_cmd	*cmd;
1285 
1286 	req = nvme_allocate_request_user_copy(qpair,
1287 					      payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
1288 					      cb_arg, true);
1289 	if (req == NULL) {
1290 		return -ENOMEM;
1291 	}
1292 
1293 	cmd = &req->cmd;
1294 	cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
1295 	cmd->nsid = ns->id;
1296 
1297 	cmd->cdw10_bits.resv_release.rrela = action;
1298 	cmd->cdw10_bits.resv_release.iekey = ignore_key;
1299 	cmd->cdw10_bits.resv_release.rtype = type;
1300 
1301 	return nvme_qpair_submit_request(qpair, req);
1302 }
1303 
1304 int
1305 spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
1306 				     struct spdk_nvme_qpair *qpair,
1307 				     struct spdk_nvme_reservation_acquire_data *payload,
1308 				     bool ignore_key,
1309 				     enum spdk_nvme_reservation_acquire_action action,
1310 				     enum spdk_nvme_reservation_type type,
1311 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1312 {
1313 	struct nvme_request	*req;
1314 	struct spdk_nvme_cmd	*cmd;
1315 
1316 	req = nvme_allocate_request_user_copy(qpair,
1317 					      payload, sizeof(struct spdk_nvme_reservation_acquire_data),
1318 					      cb_fn, cb_arg, true);
1319 	if (req == NULL) {
1320 		return -ENOMEM;
1321 	}
1322 
1323 	cmd = &req->cmd;
1324 	cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
1325 	cmd->nsid = ns->id;
1326 
1327 	cmd->cdw10_bits.resv_acquire.racqa = action;
1328 	cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
1329 	cmd->cdw10_bits.resv_acquire.rtype = type;
1330 
1331 	return nvme_qpair_submit_request(qpair, req);
1332 }
1333 
1334 int
1335 spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
1336 				    struct spdk_nvme_qpair *qpair,
1337 				    void *payload, uint32_t len,
1338 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1339 {
1340 	uint32_t		num_dwords;
1341 	struct nvme_request	*req;
1342 	struct spdk_nvme_cmd	*cmd;
1343 
1344 	if (len & 0x3) {
1345 		return -EINVAL;
1346 	}
1347 
1348 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1349 	if (req == NULL) {
1350 		return -ENOMEM;
1351 	}
1352 
1353 	cmd = &req->cmd;
1354 	cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
1355 	cmd->nsid = ns->id;
1356 
1357 	num_dwords = (len >> 2);
1358 	cmd->cdw10 = num_dwords - 1; /* 0-based */
1359 
1360 	return nvme_qpair_submit_request(qpair, req);
1361 }
1362 
1363 int
1364 spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1365 			      void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1366 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1367 {
1368 	uint32_t		num_dwords;
1369 	struct nvme_request	*req;
1370 	struct spdk_nvme_cmd	*cmd;
1371 
1372 	if (len & 0x3) {
1373 		return -EINVAL;
1374 	}
1375 
1376 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1377 	if (req == NULL) {
1378 		return -ENOMEM;
1379 	}
1380 
1381 	cmd = &req->cmd;
1382 	cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE;
1383 	cmd->nsid = ns->id;
1384 
1385 	cmd->cdw10_bits.mgmt_send_recv.mo = mo;
1386 	cmd->cdw10_bits.mgmt_send_recv.mos = mos;
1387 
1388 	num_dwords = (len >> 2);
1389 	cmd->cdw11 = num_dwords - 1; /* 0-based */
1390 
1391 	return nvme_qpair_submit_request(qpair, req);
1392 }
1393 
1394 int
1395 spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1396 			      void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1397 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
1398 {
1399 	struct nvme_request	*req;
1400 	struct spdk_nvme_cmd	*cmd;
1401 
1402 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1403 	if (req == NULL) {
1404 		return -ENOMEM;
1405 	}
1406 
1407 	cmd = &req->cmd;
1408 	cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND;
1409 	cmd->nsid = ns->id;
1410 
1411 	cmd->cdw10_bits.mgmt_send_recv.mo = mo;
1412 	cmd->cdw10_bits.mgmt_send_recv.mos = mos;
1413 
1414 	return nvme_qpair_submit_request(qpair, req);
1415 }
1416