xref: /spdk/lib/nvme/nvme_ns_cmd.c (revision 17cb940f2d62eda264232165648ff1b0afad3dd7)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2015 Intel Corporation.
31010fb3aSDaniel Verkamp  *   All rights reserved.
4c7bb68aaSAlexey Marchuk  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5110335f1SAlexey Marchuk  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
69a1457ffSAnkit Kumar  *   Copyright (c) 2023 Samsung Electronics Co., Ltd. All rights reserved.
71010fb3aSDaniel Verkamp  */
81010fb3aSDaniel Verkamp 
91010fb3aSDaniel Verkamp #include "nvme_internal.h"
101010fb3aSDaniel Verkamp 
11da366fd0SJim Harris static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
12da366fd0SJim Harris 		struct spdk_nvme_qpair *qpair,
138a62ba51SPawel Wodkowski 		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
148a62ba51SPawel Wodkowski 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
15784182edSChangpeng Liu 		void *cb_arg, uint32_t opc, uint32_t io_flags,
16dc8d4d8dSKonrad Sztyber 		uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
17dc8d4d8dSKonrad Sztyber 		void *accel_sequence, int *rc);
18736ec496SDaniel Verkamp 
19fbf06acaSPiotr Pelplinski static bool
20a3f72b2eSSeth Howell nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
21fbf06acaSPiotr Pelplinski 			     uint32_t sectors_per_stripe, uint32_t qdepth)
22fbf06acaSPiotr Pelplinski {
234582e9fbSChangpeng Liu 	uint32_t child_per_io = UINT32_MAX;
24fbf06acaSPiotr Pelplinski 
254582e9fbSChangpeng Liu 	/* After a namespace is destroyed(e.g. hotplug), all the fields associated with the
264582e9fbSChangpeng Liu 	 * namespace will be cleared to zero, the function will return TRUE for this case,
274582e9fbSChangpeng Liu 	 * and -EINVAL will be returned to caller.
284582e9fbSChangpeng Liu 	 */
29fbf06acaSPiotr Pelplinski 	if (sectors_per_stripe > 0) {
30fbf06acaSPiotr Pelplinski 		child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
314582e9fbSChangpeng Liu 	} else if (sectors_per_max_io > 0) {
32fbf06acaSPiotr Pelplinski 		child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
33fbf06acaSPiotr Pelplinski 	}
34fbf06acaSPiotr Pelplinski 
352172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvme, "checking maximum i/o length %d\n", child_per_io);
36fbf06acaSPiotr Pelplinski 
37fbf06acaSPiotr Pelplinski 	return child_per_io >= qdepth;
38fbf06acaSPiotr Pelplinski }
39fbf06acaSPiotr Pelplinski 
40a1f848b0SAlexey Marchuk static inline int
41a1f848b0SAlexey Marchuk nvme_ns_map_failure_rc(uint32_t lba_count, uint32_t sectors_per_max_io,
42a1f848b0SAlexey Marchuk 		       uint32_t sectors_per_stripe, uint32_t qdepth, int rc)
43a1f848b0SAlexey Marchuk {
44c7bb68aaSAlexey Marchuk 	assert(rc);
45c7bb68aaSAlexey Marchuk 	if (rc == -ENOMEM &&
46c7bb68aaSAlexey Marchuk 	    nvme_ns_check_request_length(lba_count, sectors_per_max_io, sectors_per_stripe, qdepth)) {
47a1f848b0SAlexey Marchuk 		return -EINVAL;
48a1f848b0SAlexey Marchuk 	}
49c7bb68aaSAlexey Marchuk 	return rc;
50a1f848b0SAlexey Marchuk }
51a1f848b0SAlexey Marchuk 
524249dc10SNiklas Cassel static inline bool
534249dc10SNiklas Cassel _nvme_md_excluded_from_xfer(struct spdk_nvme_ns *ns, uint32_t io_flags)
544249dc10SNiklas Cassel {
554249dc10SNiklas Cassel 	return (io_flags & SPDK_NVME_IO_FLAGS_PRACT) &&
564249dc10SNiklas Cassel 	       (ns->flags & SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED) &&
574249dc10SNiklas Cassel 	       (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) &&
584249dc10SNiklas Cassel 	       (ns->md_size == 8);
594249dc10SNiklas Cassel }
604249dc10SNiklas Cassel 
61c078941cSNiklas Cassel static inline uint32_t
62c078941cSNiklas Cassel _nvme_get_host_buffer_sector_size(struct spdk_nvme_ns *ns, uint32_t io_flags)
63c078941cSNiklas Cassel {
644249dc10SNiklas Cassel 	return _nvme_md_excluded_from_xfer(ns, io_flags) ?
654249dc10SNiklas Cassel 	       ns->sector_size : ns->extended_lba_size;
66c078941cSNiklas Cassel }
67c078941cSNiklas Cassel 
684249dc10SNiklas Cassel static inline uint32_t
694249dc10SNiklas Cassel _nvme_get_sectors_per_max_io(struct spdk_nvme_ns *ns, uint32_t io_flags)
704249dc10SNiklas Cassel {
714249dc10SNiklas Cassel 	return _nvme_md_excluded_from_xfer(ns, io_flags) ?
724249dc10SNiklas Cassel 	       ns->sectors_per_max_io_no_md : ns->sectors_per_max_io;
73c078941cSNiklas Cassel }
74c078941cSNiklas Cassel 
75736ec496SDaniel Verkamp static struct nvme_request *
76cd13f280SDaniel Verkamp _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
77cd13f280SDaniel Verkamp 			const struct nvme_payload *payload,
784cc1cf88SJim Harris 			uint32_t payload_offset, uint32_t md_offset,
794cc1cf88SJim Harris 			uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
80eb37d519SAnkit Kumar 			uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
81c7bb68aaSAlexey Marchuk 			struct nvme_request *parent, bool check_sgl, int *rc)
824cc1cf88SJim Harris {
834cc1cf88SJim Harris 	struct nvme_request	*child;
844cc1cf88SJim Harris 
85cd13f280SDaniel Verkamp 	child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
86dc8d4d8dSKonrad Sztyber 				cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc);
874cc1cf88SJim Harris 	if (child == NULL) {
884cc1cf88SJim Harris 		nvme_request_free_children(parent);
894cc1cf88SJim Harris 		nvme_free_request(parent);
904cc1cf88SJim Harris 		return NULL;
914cc1cf88SJim Harris 	}
924cc1cf88SJim Harris 
934cc1cf88SJim Harris 	nvme_request_add_child(parent, child);
944cc1cf88SJim Harris 	return child;
954cc1cf88SJim Harris }
964cc1cf88SJim Harris 
974cc1cf88SJim Harris static struct nvme_request *
986ce73aa6SDaniel Verkamp _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
99cd13f280SDaniel Verkamp 			   struct spdk_nvme_qpair *qpair,
100407a5716SDaniel Verkamp 			   const struct nvme_payload *payload,
1018a62ba51SPawel Wodkowski 			   uint32_t payload_offset, uint32_t md_offset,
102736ec496SDaniel Verkamp 			   uint64_t lba, uint32_t lba_count,
1036ce73aa6SDaniel Verkamp 			   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
10481f40464SBen Walker 			   uint32_t io_flags, struct nvme_request *req,
105784182edSChangpeng Liu 			   uint32_t sectors_per_max_io, uint32_t sector_mask,
106dc8d4d8dSKonrad Sztyber 			   uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
107dc8d4d8dSKonrad Sztyber 			   void *accel_sequence, int *rc)
108736ec496SDaniel Verkamp {
109c078941cSNiklas Cassel 	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
110736ec496SDaniel Verkamp 	uint32_t		remaining_lba_count = lba_count;
111bcf546bbSPawel Wodkowski 	struct nvme_request	*child;
112736ec496SDaniel Verkamp 
113dc8d4d8dSKonrad Sztyber 	if (spdk_unlikely(accel_sequence != NULL)) {
114dc8d4d8dSKonrad Sztyber 		SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
115dc8d4d8dSKonrad Sztyber 		*rc = -EINVAL;
116dc8d4d8dSKonrad Sztyber 		return NULL;
117dc8d4d8dSKonrad Sztyber 	}
118dc8d4d8dSKonrad Sztyber 
119736ec496SDaniel Verkamp 	while (remaining_lba_count > 0) {
120736ec496SDaniel Verkamp 		lba_count = sectors_per_max_io - (lba & sector_mask);
12184d90484SDaniel Verkamp 		lba_count = spdk_min(remaining_lba_count, lba_count);
122736ec496SDaniel Verkamp 
123cd13f280SDaniel Verkamp 		child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
1244cc1cf88SJim Harris 						lba, lba_count, cb_fn, cb_arg, opc,
125eb37d519SAnkit Kumar 						io_flags, apptag_mask, apptag, cdw13, req, true, rc);
126736ec496SDaniel Verkamp 		if (child == NULL) {
127736ec496SDaniel Verkamp 			return NULL;
128736ec496SDaniel Verkamp 		}
1298a62ba51SPawel Wodkowski 
130736ec496SDaniel Verkamp 		remaining_lba_count -= lba_count;
131736ec496SDaniel Verkamp 		lba += lba_count;
1328a62ba51SPawel Wodkowski 		payload_offset += lba_count * sector_size;
133c078941cSNiklas Cassel 		md_offset += lba_count * ns->md_size;
134736ec496SDaniel Verkamp 	}
135736ec496SDaniel Verkamp 
136736ec496SDaniel Verkamp 	return req;
137736ec496SDaniel Verkamp }
138736ec496SDaniel Verkamp 
139a7b6702dSTomasz Kulasek static inline bool
140a7b6702dSTomasz Kulasek _is_io_flags_valid(uint32_t io_flags)
141a7b6702dSTomasz Kulasek {
1429e60b74bSJacek Kalwas 	if (spdk_unlikely(io_flags & ~SPDK_NVME_IO_FLAGS_VALID_MASK)) {
143a7b6702dSTomasz Kulasek 		/* Invalid io_flags */
144a7b6702dSTomasz Kulasek 		SPDK_ERRLOG("Invalid io_flags 0x%x\n", io_flags);
145a7b6702dSTomasz Kulasek 		return false;
146a7b6702dSTomasz Kulasek 	}
147a7b6702dSTomasz Kulasek 
148a7b6702dSTomasz Kulasek 	return true;
149a7b6702dSTomasz Kulasek }
150a7b6702dSTomasz Kulasek 
151dc8d4d8dSKonrad Sztyber static inline bool
152dc8d4d8dSKonrad Sztyber _is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq)
153dc8d4d8dSKonrad Sztyber {
154dc8d4d8dSKonrad Sztyber 	/* An accel sequence can only be executed if the controller supports accel and a qpair is
155dc8d4d8dSKonrad Sztyber 	 * part of a of a poll group */
1569e60b74bSJacek Kalwas 	if (spdk_likely(seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) &&
1579e60b74bSJacek Kalwas 					qpair->poll_group != NULL))) {
1589e60b74bSJacek Kalwas 		return true;
1599e60b74bSJacek Kalwas 	}
1609e60b74bSJacek Kalwas 
1619e60b74bSJacek Kalwas 	return false;
162dc8d4d8dSKonrad Sztyber }
163dc8d4d8dSKonrad Sztyber 
164636b078bSPawel Wodkowski static void
165636b078bSPawel Wodkowski _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
166ee5b26a2SYongseok Oh 			   uint32_t opc, uint64_t lba, uint32_t lba_count,
167eb37d519SAnkit Kumar 			   uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
168eb37d519SAnkit Kumar 			   uint32_t cdw13)
169636b078bSPawel Wodkowski {
170636b078bSPawel Wodkowski 	struct spdk_nvme_cmd	*cmd;
171636b078bSPawel Wodkowski 
172a7b6702dSTomasz Kulasek 	assert(_is_io_flags_valid(io_flags));
173a7b6702dSTomasz Kulasek 
174636b078bSPawel Wodkowski 	cmd = &req->cmd;
175636b078bSPawel Wodkowski 	cmd->opc = opc;
176636b078bSPawel Wodkowski 	cmd->nsid = ns->id;
177636b078bSPawel Wodkowski 
178636b078bSPawel Wodkowski 	*(uint64_t *)&cmd->cdw10 = lba;
179636b078bSPawel Wodkowski 
180636b078bSPawel Wodkowski 	if (ns->flags & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
181636b078bSPawel Wodkowski 		switch (ns->pi_type) {
182636b078bSPawel Wodkowski 		case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
183636b078bSPawel Wodkowski 		case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
184636b078bSPawel Wodkowski 			cmd->cdw14 = (uint32_t)lba;
185636b078bSPawel Wodkowski 			break;
186636b078bSPawel Wodkowski 		}
187636b078bSPawel Wodkowski 	}
188636b078bSPawel Wodkowski 
1891c5d9803STomasz Kulasek 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1901c5d9803STomasz Kulasek 
191636b078bSPawel Wodkowski 	cmd->cdw12 = lba_count - 1;
1921c5d9803STomasz Kulasek 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
193636b078bSPawel Wodkowski 
194eb37d519SAnkit Kumar 	cmd->cdw13 = cdw13;
195eb37d519SAnkit Kumar 
196636b078bSPawel Wodkowski 	cmd->cdw15 = apptag_mask;
197636b078bSPawel Wodkowski 	cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
198636b078bSPawel Wodkowski }
199636b078bSPawel Wodkowski 
200736ec496SDaniel Verkamp static struct nvme_request *
201c27e4a18SJim Harris _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
202cd13f280SDaniel Verkamp 			       struct spdk_nvme_qpair *qpair,
20386e8a920SJim Harris 			       const struct nvme_payload *payload,
20486e8a920SJim Harris 			       uint32_t payload_offset, uint32_t md_offset,
20586e8a920SJim Harris 			       uint64_t lba, uint32_t lba_count,
20686e8a920SJim Harris 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
20786e8a920SJim Harris 			       uint32_t io_flags, struct nvme_request *req,
208dc8d4d8dSKonrad Sztyber 			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
209dc8d4d8dSKonrad Sztyber 			       void *accel_sequence, int *rc)
21086e8a920SJim Harris {
2117dff719fSDaniel Verkamp 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
2127dff719fSDaniel Verkamp 	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
2137dff719fSDaniel Verkamp 	void *sgl_cb_arg = req->payload.contig_or_cb_arg;
21486e8a920SJim Harris 	bool start_valid, end_valid, last_sge, child_equals_parent;
21586e8a920SJim Harris 	uint64_t child_lba = lba;
21686e8a920SJim Harris 	uint32_t req_current_length = 0;
21786e8a920SJim Harris 	uint32_t child_length = 0;
21886e8a920SJim Harris 	uint32_t sge_length;
2192eec131eSDaniel Verkamp 	uint32_t page_size = qpair->ctrlr->page_size;
22086e8a920SJim Harris 	uintptr_t address;
22186e8a920SJim Harris 
2227dff719fSDaniel Verkamp 	reset_sgl_fn(sgl_cb_arg, payload_offset);
2237dff719fSDaniel Verkamp 	next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
22486e8a920SJim Harris 	while (req_current_length < req->payload_size) {
22586e8a920SJim Harris 
22686e8a920SJim Harris 		if (sge_length == 0) {
22786e8a920SJim Harris 			continue;
22886e8a920SJim Harris 		} else if (req_current_length + sge_length > req->payload_size) {
22986e8a920SJim Harris 			sge_length = req->payload_size - req_current_length;
23086e8a920SJim Harris 		}
23186e8a920SJim Harris 
23286e8a920SJim Harris 		/*
23386e8a920SJim Harris 		 * The start of the SGE is invalid if the start address is not page aligned,
23486e8a920SJim Harris 		 *  unless it is the first SGE in the child request.
23586e8a920SJim Harris 		 */
2362eec131eSDaniel Verkamp 		start_valid = child_length == 0 || _is_page_aligned(address, page_size);
23786e8a920SJim Harris 
23886e8a920SJim Harris 		/* Boolean for whether this is the last SGE in the parent request. */
23986e8a920SJim Harris 		last_sge = (req_current_length + sge_length == req->payload_size);
24086e8a920SJim Harris 
24186e8a920SJim Harris 		/*
24286e8a920SJim Harris 		 * The end of the SGE is invalid if the end address is not page aligned,
24386e8a920SJim Harris 		 *  unless it is the last SGE in the parent request.
24486e8a920SJim Harris 		 */
2452eec131eSDaniel Verkamp 		end_valid = last_sge || _is_page_aligned(address + sge_length, page_size);
24686e8a920SJim Harris 
24786e8a920SJim Harris 		/*
24886e8a920SJim Harris 		 * This child request equals the parent request, meaning that no splitting
24986e8a920SJim Harris 		 *  was required for the parent request (the one passed into this function).
25086e8a920SJim Harris 		 *  In this case, we do not create a child request at all - we just send
25186e8a920SJim Harris 		 *  the original request as a single request at the end of this function.
25286e8a920SJim Harris 		 */
25386e8a920SJim Harris 		child_equals_parent = (child_length + sge_length == req->payload_size);
25486e8a920SJim Harris 
25586e8a920SJim Harris 		if (start_valid) {
25686e8a920SJim Harris 			/*
25786e8a920SJim Harris 			 * The start of the SGE is valid, so advance the length parameters,
25886e8a920SJim Harris 			 *  to include this SGE with previous SGEs for this child request
25986e8a920SJim Harris 			 *  (if any).  If it is not valid, we do not advance the length
26086e8a920SJim Harris 			 *  parameters nor get the next SGE, because we must send what has
26186e8a920SJim Harris 			 *  been collected before this SGE as a child request.
26286e8a920SJim Harris 			 */
26386e8a920SJim Harris 			child_length += sge_length;
26486e8a920SJim Harris 			req_current_length += sge_length;
26586e8a920SJim Harris 			if (req_current_length < req->payload_size) {
2667dff719fSDaniel Verkamp 				next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
2670b33e77eSJim Harris 				/*
2681f49ee23SNiklas Cassel 				 * If the next SGE is not page aligned, we will need to create a
2691f49ee23SNiklas Cassel 				 *  child request for what we have so far, and then start a new
2701f49ee23SNiklas Cassel 				 *  child request for the next SGE.
2710b33e77eSJim Harris 				 */
2722eec131eSDaniel Verkamp 				start_valid = _is_page_aligned(address, page_size);
27386e8a920SJim Harris 			}
2741f49ee23SNiklas Cassel 		}
27586e8a920SJim Harris 
276e2b330e9SJim Harris 		if (start_valid && end_valid && !last_sge) {
277e2b330e9SJim Harris 			continue;
278e2b330e9SJim Harris 		}
279e2b330e9SJim Harris 
28086e8a920SJim Harris 		/*
281e2b330e9SJim Harris 		 * We need to create a split here.  Send what we have accumulated so far as a child
282e2b330e9SJim Harris 		 *  request.  Checking if child_equals_parent allows us to *not* create a child request
283e2b330e9SJim Harris 		 *  when no splitting is required - in that case we will fall-through and just create
284e2b330e9SJim Harris 		 *  a single request with no children for the entire I/O.
28586e8a920SJim Harris 		 */
286e2b330e9SJim Harris 		if (!child_equals_parent) {
28786e8a920SJim Harris 			struct nvme_request *child;
28886e8a920SJim Harris 			uint32_t child_lba_count;
28986e8a920SJim Harris 
29059fc5ba6SDaniel Verkamp 			if ((child_length % ns->extended_lba_size) != 0) {
291c27e4a18SJim Harris 				SPDK_ERRLOG("child_length %u not even multiple of lba_size %u\n",
292c27e4a18SJim Harris 					    child_length, ns->extended_lba_size);
293c7bb68aaSAlexey Marchuk 				*rc = -EINVAL;
29486e8a920SJim Harris 				return NULL;
29586e8a920SJim Harris 			}
296dc8d4d8dSKonrad Sztyber 			if (spdk_unlikely(accel_sequence != NULL)) {
297dc8d4d8dSKonrad Sztyber 				SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
298dc8d4d8dSKonrad Sztyber 				*rc = -EINVAL;
299dc8d4d8dSKonrad Sztyber 				return NULL;
300dc8d4d8dSKonrad Sztyber 			}
301dc8d4d8dSKonrad Sztyber 
30259fc5ba6SDaniel Verkamp 			child_lba_count = child_length / ns->extended_lba_size;
30386e8a920SJim Harris 			/*
30486e8a920SJim Harris 			 * Note the last parameter is set to "false" - this tells the recursive
30586e8a920SJim Harris 			 *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
30686e8a920SJim Harris 			 *  since we have already verified it here.
30786e8a920SJim Harris 			 */
308cd13f280SDaniel Verkamp 			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
30986e8a920SJim Harris 							child_lba, child_lba_count,
31086e8a920SJim Harris 							cb_fn, cb_arg, opc, io_flags,
311eb37d519SAnkit Kumar 							apptag_mask, apptag, cdw13, req, false, rc);
31286e8a920SJim Harris 			if (child == NULL) {
31386e8a920SJim Harris 				return NULL;
31486e8a920SJim Harris 			}
31586e8a920SJim Harris 			payload_offset += child_length;
31686e8a920SJim Harris 			md_offset += child_lba_count * ns->md_size;
31786e8a920SJim Harris 			child_lba += child_lba_count;
31886e8a920SJim Harris 			child_length = 0;
31986e8a920SJim Harris 		}
32086e8a920SJim Harris 	}
32186e8a920SJim Harris 
32286e8a920SJim Harris 	if (child_length == req->payload_size) {
32386e8a920SJim Harris 		/* No splitting was required, so setup the whole payload as one request. */
324eb37d519SAnkit Kumar 		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
32586e8a920SJim Harris 	}
32686e8a920SJim Harris 
32786e8a920SJim Harris 	return req;
32886e8a920SJim Harris }
32986e8a920SJim Harris 
33086e8a920SJim Harris static struct nvme_request *
331c27e4a18SJim Harris _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
332c27e4a18SJim Harris 			       struct spdk_nvme_qpair *qpair,
333c27e4a18SJim Harris 			       const struct nvme_payload *payload,
334c27e4a18SJim Harris 			       uint32_t payload_offset, uint32_t md_offset,
335c27e4a18SJim Harris 			       uint64_t lba, uint32_t lba_count,
336c27e4a18SJim Harris 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
337c27e4a18SJim Harris 			       uint32_t io_flags, struct nvme_request *req,
338dc8d4d8dSKonrad Sztyber 			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
339dc8d4d8dSKonrad Sztyber 			       void *accel_sequence, int *rc)
340c27e4a18SJim Harris {
3417dff719fSDaniel Verkamp 	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
3427dff719fSDaniel Verkamp 	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
3437dff719fSDaniel Verkamp 	void *sgl_cb_arg = req->payload.contig_or_cb_arg;
344c27e4a18SJim Harris 	uint64_t child_lba = lba;
345c27e4a18SJim Harris 	uint32_t req_current_length = 0;
346*17cb940fSBen Walker 	uint32_t accumulated_length = 0;
347c27e4a18SJim Harris 	uint32_t sge_length;
348c27e4a18SJim Harris 	uint16_t max_sges, num_sges;
349c27e4a18SJim Harris 	uintptr_t address;
350c27e4a18SJim Harris 
351c27e4a18SJim Harris 	max_sges = ns->ctrlr->max_sges;
352c27e4a18SJim Harris 
3537dff719fSDaniel Verkamp 	reset_sgl_fn(sgl_cb_arg, payload_offset);
354c27e4a18SJim Harris 	num_sges = 0;
355c27e4a18SJim Harris 
356c27e4a18SJim Harris 	while (req_current_length < req->payload_size) {
3577dff719fSDaniel Verkamp 		next_sge_fn(sgl_cb_arg, (void **)&address, &sge_length);
358c27e4a18SJim Harris 
359c27e4a18SJim Harris 		if (req_current_length + sge_length > req->payload_size) {
360c27e4a18SJim Harris 			sge_length = req->payload_size - req_current_length;
361c27e4a18SJim Harris 		}
362c27e4a18SJim Harris 
363*17cb940fSBen Walker 		accumulated_length += sge_length;
364c27e4a18SJim Harris 		req_current_length += sge_length;
365c27e4a18SJim Harris 		num_sges++;
366c27e4a18SJim Harris 
367b2e778b2SSeth Howell 		if (num_sges < max_sges && req_current_length < req->payload_size) {
368c27e4a18SJim Harris 			continue;
369c27e4a18SJim Harris 		}
370c27e4a18SJim Harris 
371c27e4a18SJim Harris 		/*
372c27e4a18SJim Harris 		 * We need to create a split here.  Send what we have accumulated so far as a child
373c27e4a18SJim Harris 		 *  request.  Checking if the child equals the full payload allows us to *not*
374c27e4a18SJim Harris 		 *  create a child request when no splitting is required - in that case we will
375c27e4a18SJim Harris 		 *  fall-through and just create a single request with no children for the entire I/O.
376c27e4a18SJim Harris 		 */
377*17cb940fSBen Walker 		if (accumulated_length != req->payload_size) {
378c27e4a18SJim Harris 			struct nvme_request *child;
379c27e4a18SJim Harris 			uint32_t child_lba_count;
380*17cb940fSBen Walker 			uint32_t child_length;
381*17cb940fSBen Walker 			uint32_t extra_length;
382c27e4a18SJim Harris 
383*17cb940fSBen Walker 			child_length = accumulated_length;
384*17cb940fSBen Walker 			/* Child length may not be a multiple of the block size! */
385*17cb940fSBen Walker 			child_lba_count = child_length / ns->extended_lba_size;
386*17cb940fSBen Walker 			extra_length = child_length - (child_lba_count * ns->extended_lba_size);
387*17cb940fSBen Walker 			if (extra_length != 0) {
388*17cb940fSBen Walker 				/* The last SGE does not end on a block boundary. We need to cut it off. */
389*17cb940fSBen Walker 				if (extra_length >= child_length) {
390*17cb940fSBen Walker 					SPDK_ERRLOG("Unable to send I/O. Would require more than the supported number of "
391*17cb940fSBen Walker 						    "SGL Elements.");
392c7bb68aaSAlexey Marchuk 					*rc = -EINVAL;
393c27e4a18SJim Harris 					return NULL;
394c27e4a18SJim Harris 				}
395*17cb940fSBen Walker 				child_length -= extra_length;
396*17cb940fSBen Walker 			}
397*17cb940fSBen Walker 
398dc8d4d8dSKonrad Sztyber 			if (spdk_unlikely(accel_sequence != NULL)) {
399dc8d4d8dSKonrad Sztyber 				SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
400dc8d4d8dSKonrad Sztyber 				*rc = -EINVAL;
401dc8d4d8dSKonrad Sztyber 				return NULL;
402dc8d4d8dSKonrad Sztyber 			}
403dc8d4d8dSKonrad Sztyber 
404c27e4a18SJim Harris 			/*
405c27e4a18SJim Harris 			 * Note the last parameter is set to "false" - this tells the recursive
406c27e4a18SJim Harris 			 *  call to _nvme_ns_cmd_rw() to not bother with checking for SGL splitting
407c27e4a18SJim Harris 			 *  since we have already verified it here.
408c27e4a18SJim Harris 			 */
409c27e4a18SJim Harris 			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
410c27e4a18SJim Harris 							child_lba, child_lba_count,
411c27e4a18SJim Harris 							cb_fn, cb_arg, opc, io_flags,
412eb37d519SAnkit Kumar 							apptag_mask, apptag, cdw13, req, false, rc);
413c27e4a18SJim Harris 			if (child == NULL) {
414c27e4a18SJim Harris 				return NULL;
415c27e4a18SJim Harris 			}
416c27e4a18SJim Harris 			payload_offset += child_length;
417c27e4a18SJim Harris 			md_offset += child_lba_count * ns->md_size;
418c27e4a18SJim Harris 			child_lba += child_lba_count;
419*17cb940fSBen Walker 			accumulated_length -= child_length;
420*17cb940fSBen Walker 			num_sges = accumulated_length > 0;
421c27e4a18SJim Harris 		}
422c27e4a18SJim Harris 	}
423c27e4a18SJim Harris 
424*17cb940fSBen Walker 	if (accumulated_length == req->payload_size) {
425c27e4a18SJim Harris 		/* No splitting was required, so setup the whole payload as one request. */
426eb37d519SAnkit Kumar 		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
427c27e4a18SJim Harris 	}
428c27e4a18SJim Harris 
429c27e4a18SJim Harris 	return req;
430c27e4a18SJim Harris }
431c27e4a18SJim Harris 
432da366fd0SJim Harris static inline struct nvme_request *
433cd13f280SDaniel Verkamp _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
434cd13f280SDaniel Verkamp 		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
4356ce73aa6SDaniel Verkamp 		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
436dc8d4d8dSKonrad Sztyber 		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
437dc8d4d8dSKonrad Sztyber 		void *accel_sequence, int *rc)
4381010fb3aSDaniel Verkamp {
4391010fb3aSDaniel Verkamp 	struct nvme_request	*req;
440c078941cSNiklas Cassel 	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
4414249dc10SNiklas Cassel 	uint32_t		sectors_per_max_io = _nvme_get_sectors_per_max_io(ns, io_flags);
442c078941cSNiklas Cassel 	uint32_t		sectors_per_stripe = ns->sectors_per_stripe;
443784182edSChangpeng Liu 
444c7bb68aaSAlexey Marchuk 	assert(rc != NULL);
445c7bb68aaSAlexey Marchuk 	assert(*rc == 0);
446c7bb68aaSAlexey Marchuk 
44768d3bb2dSChangpeng Liu 	req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
44868d3bb2dSChangpeng Liu 				    cb_fn, cb_arg);
4491010fb3aSDaniel Verkamp 	if (req == NULL) {
450c7bb68aaSAlexey Marchuk 		*rc = -ENOMEM;
4511010fb3aSDaniel Verkamp 		return NULL;
4521010fb3aSDaniel Verkamp 	}
4531010fb3aSDaniel Verkamp 
4548a62ba51SPawel Wodkowski 	req->payload_offset = payload_offset;
4558a62ba51SPawel Wodkowski 	req->md_offset = md_offset;
456dc8d4d8dSKonrad Sztyber 	req->accel_sequence = accel_sequence;
4578a62ba51SPawel Wodkowski 
458aa6767fbSNiklas Cassel 	/* Zone append commands cannot be split. */
459aa6767fbSNiklas Cassel 	if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
460aa6767fbSNiklas Cassel 		assert(ns->csi == SPDK_NVME_CSI_ZNS);
461014baeb8SNiklas Cassel 		/*
462014baeb8SNiklas Cassel 		 * As long as we disable driver-assisted striping for Zone append commands,
463014baeb8SNiklas Cassel 		 * _nvme_ns_cmd_rw() should never cause a proper request to be split.
464014baeb8SNiklas Cassel 		 * If a request is split, after all, error handling is done in caller functions.
465014baeb8SNiklas Cassel 		 */
466014baeb8SNiklas Cassel 		sectors_per_stripe = 0;
467aa6767fbSNiklas Cassel 	}
468aa6767fbSNiklas Cassel 
4691010fb3aSDaniel Verkamp 	/*
4701010fb3aSDaniel Verkamp 	 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
4711010fb3aSDaniel Verkamp 	 * If this controller defines a stripe boundary and this I/O spans a stripe
4721010fb3aSDaniel Verkamp 	 *  boundary, split the request into multiple requests and submit each
4731010fb3aSDaniel Verkamp 	 *  separately to hardware.
4741010fb3aSDaniel Verkamp 	 */
4751010fb3aSDaniel Verkamp 	if (sectors_per_stripe > 0 &&
4761010fb3aSDaniel Verkamp 	    (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {
477cd13f280SDaniel Verkamp 		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
478cd13f280SDaniel Verkamp 						  cb_fn,
4798a62ba51SPawel Wodkowski 						  cb_arg, opc,
480dc8d4d8dSKonrad Sztyber 						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
481dc8d4d8dSKonrad Sztyber 						  apptag_mask, apptag, cdw13,  accel_sequence, rc);
4821010fb3aSDaniel Verkamp 	} else if (lba_count > sectors_per_max_io) {
483cd13f280SDaniel Verkamp 		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
484cd13f280SDaniel Verkamp 						  cb_fn,
4858a62ba51SPawel Wodkowski 						  cb_arg, opc,
486dc8d4d8dSKonrad Sztyber 						  io_flags, req, sectors_per_max_io, 0, apptag_mask,
487dc8d4d8dSKonrad Sztyber 						  apptag, cdw13, accel_sequence, rc);
4885c2ccd06SDaniel Verkamp 	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
489c27e4a18SJim Harris 		if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
490c27e4a18SJim Harris 			return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
491c27e4a18SJim Harris 							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
492dc8d4d8dSKonrad Sztyber 							      req, apptag_mask, apptag, cdw13,
493dc8d4d8dSKonrad Sztyber 							      accel_sequence, rc);
494c27e4a18SJim Harris 		} else {
495c27e4a18SJim Harris 			return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
496c27e4a18SJim Harris 							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
497dc8d4d8dSKonrad Sztyber 							      req, apptag_mask, apptag, cdw13,
498dc8d4d8dSKonrad Sztyber 							      accel_sequence, rc);
499c27e4a18SJim Harris 		}
500784182edSChangpeng Liu 	}
501784182edSChangpeng Liu 
502eb37d519SAnkit Kumar 	_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
5031010fb3aSDaniel Verkamp 	return req;
5041010fb3aSDaniel Verkamp }
5051010fb3aSDaniel Verkamp 
5061010fb3aSDaniel Verkamp int
50790790c94SGangCao spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
50890790c94SGangCao 			 uint64_t lba,
50990790c94SGangCao 			 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
51090790c94SGangCao 			 uint32_t io_flags)
51190790c94SGangCao {
51290790c94SGangCao 	struct nvme_request *req;
51390790c94SGangCao 	struct nvme_payload payload;
514a1f848b0SAlexey Marchuk 	int rc = 0;
51590790c94SGangCao 
516a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
517a7b6702dSTomasz Kulasek 		return -EINVAL;
518a7b6702dSTomasz Kulasek 	}
519a7b6702dSTomasz Kulasek 
520caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
52190790c94SGangCao 
52290790c94SGangCao 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
52390790c94SGangCao 			      SPDK_NVME_OPC_COMPARE,
52490790c94SGangCao 			      io_flags, 0,
525dc8d4d8dSKonrad Sztyber 			      0, 0, false, NULL, &rc);
52690790c94SGangCao 	if (req != NULL) {
52790790c94SGangCao 		return nvme_qpair_submit_request(qpair, req);
528a1f848b0SAlexey Marchuk 	} else {
529a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
530fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
531fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
532a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
533a1f848b0SAlexey Marchuk 					      rc);
53490790c94SGangCao 	}
53590790c94SGangCao }
53690790c94SGangCao 
53790790c94SGangCao int
53890790c94SGangCao spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
53990790c94SGangCao 				 void *buffer,
54090790c94SGangCao 				 void *metadata,
54190790c94SGangCao 				 uint64_t lba,
54290790c94SGangCao 				 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
54390790c94SGangCao 				 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
54490790c94SGangCao {
54590790c94SGangCao 	struct nvme_request *req;
54690790c94SGangCao 	struct nvme_payload payload;
547a1f848b0SAlexey Marchuk 	int rc = 0;
54890790c94SGangCao 
549a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
550a7b6702dSTomasz Kulasek 		return -EINVAL;
551a7b6702dSTomasz Kulasek 	}
552a7b6702dSTomasz Kulasek 
553caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
55490790c94SGangCao 
55590790c94SGangCao 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
55690790c94SGangCao 			      SPDK_NVME_OPC_COMPARE,
55790790c94SGangCao 			      io_flags,
558dc8d4d8dSKonrad Sztyber 			      apptag_mask, apptag, 0, false, NULL, &rc);
55990790c94SGangCao 	if (req != NULL) {
56090790c94SGangCao 		return nvme_qpair_submit_request(qpair, req);
561a1f848b0SAlexey Marchuk 	} else {
562a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
563fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
564fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
565a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
566a1f848b0SAlexey Marchuk 					      rc);
56790790c94SGangCao 	}
56890790c94SGangCao }
56990790c94SGangCao 
57090790c94SGangCao int
57190790c94SGangCao spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
57290790c94SGangCao 			  uint64_t lba, uint32_t lba_count,
57390790c94SGangCao 			  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
57490790c94SGangCao 			  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
57590790c94SGangCao 			  spdk_nvme_req_next_sge_cb next_sge_fn)
57690790c94SGangCao {
57790790c94SGangCao 	struct nvme_request *req;
57890790c94SGangCao 	struct nvme_payload payload;
579a1f848b0SAlexey Marchuk 	int rc = 0;
58090790c94SGangCao 
581a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
582a7b6702dSTomasz Kulasek 		return -EINVAL;
583a7b6702dSTomasz Kulasek 	}
584a7b6702dSTomasz Kulasek 
58559970a89SDaniel Verkamp 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
58690790c94SGangCao 		return -EINVAL;
58759970a89SDaniel Verkamp 	}
58890790c94SGangCao 
589caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
59090790c94SGangCao 
59190790c94SGangCao 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
59290790c94SGangCao 			      SPDK_NVME_OPC_COMPARE,
593dc8d4d8dSKonrad Sztyber 			      io_flags, 0, 0, 0, true, NULL, &rc);
59490790c94SGangCao 	if (req != NULL) {
59590790c94SGangCao 		return nvme_qpair_submit_request(qpair, req);
596a1f848b0SAlexey Marchuk 	} else {
597a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
598fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
599fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
600a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
601a1f848b0SAlexey Marchuk 					      rc);
60290790c94SGangCao 	}
60390790c94SGangCao }
60490790c94SGangCao 
60590790c94SGangCao int
606c7092e2bSMaciej Szwed spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
607c7092e2bSMaciej Szwed 				  uint64_t lba, uint32_t lba_count,
608c7092e2bSMaciej Szwed 				  spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
609c7092e2bSMaciej Szwed 				  spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
610c7092e2bSMaciej Szwed 				  spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
611c7092e2bSMaciej Szwed 				  uint16_t apptag_mask, uint16_t apptag)
612c7092e2bSMaciej Szwed {
613c7092e2bSMaciej Szwed 	struct nvme_request *req;
614c7092e2bSMaciej Szwed 	struct nvme_payload payload;
615a1f848b0SAlexey Marchuk 	int rc = 0;
616c7092e2bSMaciej Szwed 
617c7092e2bSMaciej Szwed 	if (!_is_io_flags_valid(io_flags)) {
618c7092e2bSMaciej Szwed 		return -EINVAL;
619c7092e2bSMaciej Szwed 	}
620c7092e2bSMaciej Szwed 
621c7092e2bSMaciej Szwed 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
622c7092e2bSMaciej Szwed 		return -EINVAL;
623c7092e2bSMaciej Szwed 	}
624c7092e2bSMaciej Szwed 
625c7092e2bSMaciej Szwed 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
626c7092e2bSMaciej Szwed 
627c7092e2bSMaciej Szwed 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
628dc8d4d8dSKonrad Sztyber 			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true,
629dc8d4d8dSKonrad Sztyber 			      NULL, &rc);
630c7092e2bSMaciej Szwed 	if (req != NULL) {
631c7092e2bSMaciej Szwed 		return nvme_qpair_submit_request(qpair, req);
632a1f848b0SAlexey Marchuk 	} else {
633a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
634c7092e2bSMaciej Szwed 					      ns->sectors_per_max_io,
635c7092e2bSMaciej Szwed 					      ns->sectors_per_stripe,
636a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
637a1f848b0SAlexey Marchuk 					      rc);
638c7092e2bSMaciej Szwed 	}
639c7092e2bSMaciej Szwed }
640c7092e2bSMaciej Szwed 
641c7092e2bSMaciej Szwed int
6423272320cSDaniel Verkamp spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
6433272320cSDaniel Verkamp 		      uint64_t lba,
6446ce73aa6SDaniel Verkamp 		      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
64581f40464SBen Walker 		      uint32_t io_flags)
6461010fb3aSDaniel Verkamp {
6471010fb3aSDaniel Verkamp 	struct nvme_request *req;
648407a5716SDaniel Verkamp 	struct nvme_payload payload;
649a1f848b0SAlexey Marchuk 	int rc = 0;
6501010fb3aSDaniel Verkamp 
651a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
652a7b6702dSTomasz Kulasek 		return -EINVAL;
653a7b6702dSTomasz Kulasek 	}
654a7b6702dSTomasz Kulasek 
655caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
656407a5716SDaniel Verkamp 
657cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
6588a62ba51SPawel Wodkowski 			      io_flags, 0,
659dc8d4d8dSKonrad Sztyber 			      0, 0, false, NULL, &rc);
660784182edSChangpeng Liu 	if (req != NULL) {
661784182edSChangpeng Liu 		return nvme_qpair_submit_request(qpair, req);
662a1f848b0SAlexey Marchuk 	} else {
663a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
664fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
665fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
666a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
667a1f848b0SAlexey Marchuk 					      rc);
668784182edSChangpeng Liu 	}
669784182edSChangpeng Liu }
670784182edSChangpeng Liu 
671784182edSChangpeng Liu int
672784182edSChangpeng Liu spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
673784182edSChangpeng Liu 			      void *metadata,
674784182edSChangpeng Liu 			      uint64_t lba,
675784182edSChangpeng Liu 			      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
676784182edSChangpeng Liu 			      uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
677784182edSChangpeng Liu {
678784182edSChangpeng Liu 	struct nvme_request *req;
679784182edSChangpeng Liu 	struct nvme_payload payload;
680a1f848b0SAlexey Marchuk 	int rc = 0;
681784182edSChangpeng Liu 
682a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
683a7b6702dSTomasz Kulasek 		return -EINVAL;
684a7b6702dSTomasz Kulasek 	}
685a7b6702dSTomasz Kulasek 
686caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
687784182edSChangpeng Liu 
688cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
6898a62ba51SPawel Wodkowski 			      io_flags,
690dc8d4d8dSKonrad Sztyber 			      apptag_mask, apptag, 0, false, NULL, &rc);
691d3d6d19bSChangpeng Liu 	if (req != NULL) {
692eb555b13SDaniel Verkamp 		return nvme_qpair_submit_request(qpair, req);
693a1f848b0SAlexey Marchuk 	} else {
694a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
695fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
696fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
697a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
698a1f848b0SAlexey Marchuk 					      rc);
699d3d6d19bSChangpeng Liu 	}
700d3d6d19bSChangpeng Liu }
701d3d6d19bSChangpeng Liu 
7024f1a0b26SJacek Kalwas static int
7034f1a0b26SJacek Kalwas nvme_ns_cmd_rw_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
7044f1a0b26SJacek Kalwas 		   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
7054f1a0b26SJacek Kalwas 		   struct spdk_nvme_ns_cmd_ext_io_opts *opts, enum spdk_nvme_nvm_opcode opc)
7064f1a0b26SJacek Kalwas {
7074f1a0b26SJacek Kalwas 	struct nvme_request *req;
7084f1a0b26SJacek Kalwas 	struct nvme_payload payload;
7094f1a0b26SJacek Kalwas 	void *seq;
7104f1a0b26SJacek Kalwas 	int rc = 0;
7114f1a0b26SJacek Kalwas 
7124f1a0b26SJacek Kalwas 	assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
7134f1a0b26SJacek Kalwas 	assert(opts);
7144f1a0b26SJacek Kalwas 
7154f1a0b26SJacek Kalwas 	payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata);
7164f1a0b26SJacek Kalwas 
7174f1a0b26SJacek Kalwas 	if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
7184f1a0b26SJacek Kalwas 		return -EINVAL;
7194f1a0b26SJacek Kalwas 	}
7204f1a0b26SJacek Kalwas 
7214f1a0b26SJacek Kalwas 	seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
7224f1a0b26SJacek Kalwas 	if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
7234f1a0b26SJacek Kalwas 		return -EINVAL;
7244f1a0b26SJacek Kalwas 	}
7254f1a0b26SJacek Kalwas 
7264f1a0b26SJacek Kalwas 	payload.opts = opts;
7274f1a0b26SJacek Kalwas 
7284f1a0b26SJacek Kalwas 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
7294f1a0b26SJacek Kalwas 			      opts->apptag_mask, opts->apptag, 0, false, seq, &rc);
7304f1a0b26SJacek Kalwas 	if (spdk_unlikely(req == NULL)) {
7314f1a0b26SJacek Kalwas 		return nvme_ns_map_failure_rc(lba_count,
7324f1a0b26SJacek Kalwas 					      ns->sectors_per_max_io,
7334f1a0b26SJacek Kalwas 					      ns->sectors_per_stripe,
7344f1a0b26SJacek Kalwas 					      qpair->ctrlr->opts.io_queue_requests,
7354f1a0b26SJacek Kalwas 					      rc);
7364f1a0b26SJacek Kalwas 	}
7374f1a0b26SJacek Kalwas 
7384f1a0b26SJacek Kalwas 	return nvme_qpair_submit_request(qpair, req);
7394f1a0b26SJacek Kalwas }
7404f1a0b26SJacek Kalwas 
7414f1a0b26SJacek Kalwas int
7424f1a0b26SJacek Kalwas spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer,
7434f1a0b26SJacek Kalwas 			  uint64_t lba,
7444f1a0b26SJacek Kalwas 			  uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
7454f1a0b26SJacek Kalwas 			  struct spdk_nvme_ns_cmd_ext_io_opts *opts)
7464f1a0b26SJacek Kalwas {
7474f1a0b26SJacek Kalwas 	return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
7484f1a0b26SJacek Kalwas 				  SPDK_NVME_OPC_READ);
7494f1a0b26SJacek Kalwas }
7504f1a0b26SJacek Kalwas 
751d3d6d19bSChangpeng Liu int
7523272320cSDaniel Verkamp spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
7533272320cSDaniel Verkamp 		       uint64_t lba, uint32_t lba_count,
7546ce73aa6SDaniel Verkamp 		       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
7556ce73aa6SDaniel Verkamp 		       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
7566ce73aa6SDaniel Verkamp 		       spdk_nvme_req_next_sge_cb next_sge_fn)
757d3d6d19bSChangpeng Liu {
758d3d6d19bSChangpeng Liu 	struct nvme_request *req;
759407a5716SDaniel Verkamp 	struct nvme_payload payload;
760a1f848b0SAlexey Marchuk 	int rc = 0;
761d3d6d19bSChangpeng Liu 
762a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
763a7b6702dSTomasz Kulasek 		return -EINVAL;
764a7b6702dSTomasz Kulasek 	}
765a7b6702dSTomasz Kulasek 
76659970a89SDaniel Verkamp 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
767f0b20026SMinfei Huang 		return -EINVAL;
76859970a89SDaniel Verkamp 	}
769ee292e4bSLiang Yan 
770caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
771407a5716SDaniel Verkamp 
772cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
773dc8d4d8dSKonrad Sztyber 			      io_flags, 0, 0, 0, true, NULL, &rc);
7741010fb3aSDaniel Verkamp 	if (req != NULL) {
775eb555b13SDaniel Verkamp 		return nvme_qpair_submit_request(qpair, req);
776a1f848b0SAlexey Marchuk 	} else {
777a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
778fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
779fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
780a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
781a1f848b0SAlexey Marchuk 					      rc);
7821010fb3aSDaniel Verkamp 	}
7831010fb3aSDaniel Verkamp }
7841010fb3aSDaniel Verkamp 
7851010fb3aSDaniel Verkamp int
786f52d8008SYoung Tack Jin spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
787f52d8008SYoung Tack Jin 			       uint64_t lba, uint32_t lba_count,
788f52d8008SYoung Tack Jin 			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
789f52d8008SYoung Tack Jin 			       spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
7905f1c1189SXiaodong Liu 			       spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
7915f1c1189SXiaodong Liu 			       uint16_t apptag_mask, uint16_t apptag)
792f52d8008SYoung Tack Jin {
793f52d8008SYoung Tack Jin 	struct nvme_request *req;
794f52d8008SYoung Tack Jin 	struct nvme_payload payload;
795a1f848b0SAlexey Marchuk 	int rc = 0;
796f52d8008SYoung Tack Jin 
797a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
798a7b6702dSTomasz Kulasek 		return -EINVAL;
799a7b6702dSTomasz Kulasek 	}
800a7b6702dSTomasz Kulasek 
801f52d8008SYoung Tack Jin 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
802f52d8008SYoung Tack Jin 		return -EINVAL;
803f52d8008SYoung Tack Jin 	}
804f52d8008SYoung Tack Jin 
805f52d8008SYoung Tack Jin 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
806f52d8008SYoung Tack Jin 
807f52d8008SYoung Tack Jin 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
808dc8d4d8dSKonrad Sztyber 			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
809f52d8008SYoung Tack Jin 	if (req != NULL) {
810f52d8008SYoung Tack Jin 		return nvme_qpair_submit_request(qpair, req);
811a1f848b0SAlexey Marchuk 	} else {
812a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
813f52d8008SYoung Tack Jin 					      ns->sectors_per_max_io,
814f52d8008SYoung Tack Jin 					      ns->sectors_per_stripe,
815a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
816a1f848b0SAlexey Marchuk 					      rc);
817f52d8008SYoung Tack Jin 	}
818f52d8008SYoung Tack Jin }
819f52d8008SYoung Tack Jin 
820ba58dd60SJacek Kalwas static int
821ba58dd60SJacek Kalwas nvme_ns_cmd_rwv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
822ba58dd60SJacek Kalwas 		    uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
823ba58dd60SJacek Kalwas 		    spdk_nvme_req_next_sge_cb next_sge_fn, struct spdk_nvme_ns_cmd_ext_io_opts *opts,
824ba58dd60SJacek Kalwas 		    enum spdk_nvme_nvm_opcode opc)
825110335f1SAlexey Marchuk {
826110335f1SAlexey Marchuk 	struct nvme_request *req;
827110335f1SAlexey Marchuk 	struct nvme_payload payload;
828dc8d4d8dSKonrad Sztyber 	void *seq;
829110335f1SAlexey Marchuk 	int rc = 0;
830110335f1SAlexey Marchuk 
831ba58dd60SJacek Kalwas 	assert(opc == SPDK_NVME_OPC_READ || opc == SPDK_NVME_OPC_WRITE);
832ba58dd60SJacek Kalwas 
833110335f1SAlexey Marchuk 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
834110335f1SAlexey Marchuk 		return -EINVAL;
835110335f1SAlexey Marchuk 	}
836110335f1SAlexey Marchuk 
837110335f1SAlexey Marchuk 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
838110335f1SAlexey Marchuk 
839110335f1SAlexey Marchuk 	if (opts) {
840110335f1SAlexey Marchuk 		if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
841110335f1SAlexey Marchuk 			return -EINVAL;
842110335f1SAlexey Marchuk 		}
843110335f1SAlexey Marchuk 
844dc8d4d8dSKonrad Sztyber 		seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
845dc8d4d8dSKonrad Sztyber 		if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
846dc8d4d8dSKonrad Sztyber 			return -EINVAL;
847dc8d4d8dSKonrad Sztyber 		}
848dc8d4d8dSKonrad Sztyber 
849110335f1SAlexey Marchuk 		payload.opts = opts;
850110335f1SAlexey Marchuk 		payload.md = opts->metadata;
851ba58dd60SJacek Kalwas 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
852ba58dd60SJacek Kalwas 				      opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);
853110335f1SAlexey Marchuk 
854110335f1SAlexey Marchuk 	} else {
855ba58dd60SJacek Kalwas 		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0,
856ba58dd60SJacek Kalwas 				      true, NULL, &rc);
857110335f1SAlexey Marchuk 	}
858110335f1SAlexey Marchuk 
859ba58dd60SJacek Kalwas 	if (req == NULL) {
860110335f1SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
861110335f1SAlexey Marchuk 					      ns->sectors_per_max_io,
862110335f1SAlexey Marchuk 					      ns->sectors_per_stripe,
863110335f1SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
864110335f1SAlexey Marchuk 					      rc);
865110335f1SAlexey Marchuk 	}
866ba58dd60SJacek Kalwas 
867ba58dd60SJacek Kalwas 	return nvme_qpair_submit_request(qpair, req);
868ba58dd60SJacek Kalwas }
869ba58dd60SJacek Kalwas 
870ba58dd60SJacek Kalwas int
871ba58dd60SJacek Kalwas spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
872ba58dd60SJacek Kalwas 			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
873ba58dd60SJacek Kalwas 			   void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
874ba58dd60SJacek Kalwas 			   spdk_nvme_req_next_sge_cb next_sge_fn,
875ba58dd60SJacek Kalwas 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
876ba58dd60SJacek Kalwas {
877ba58dd60SJacek Kalwas 	return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
878ba58dd60SJacek Kalwas 				   opts, SPDK_NVME_OPC_READ);
879110335f1SAlexey Marchuk }
880110335f1SAlexey Marchuk 
881110335f1SAlexey Marchuk int
8823272320cSDaniel Verkamp spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
8833272320cSDaniel Verkamp 		       void *buffer, uint64_t lba,
8846ce73aa6SDaniel Verkamp 		       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
88581f40464SBen Walker 		       uint32_t io_flags)
8861010fb3aSDaniel Verkamp {
8871010fb3aSDaniel Verkamp 	struct nvme_request *req;
888407a5716SDaniel Verkamp 	struct nvme_payload payload;
889a1f848b0SAlexey Marchuk 	int rc = 0;
8901010fb3aSDaniel Verkamp 
891a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
892a7b6702dSTomasz Kulasek 		return -EINVAL;
893a7b6702dSTomasz Kulasek 	}
894a7b6702dSTomasz Kulasek 
895caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
896407a5716SDaniel Verkamp 
897cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
898dc8d4d8dSKonrad Sztyber 			      io_flags, 0, 0, 0, false, NULL, &rc);
899784182edSChangpeng Liu 	if (req != NULL) {
900784182edSChangpeng Liu 		return nvme_qpair_submit_request(qpair, req);
901a1f848b0SAlexey Marchuk 	} else {
902a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
903fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
904fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
905a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
906a1f848b0SAlexey Marchuk 					      rc);
907784182edSChangpeng Liu 	}
908784182edSChangpeng Liu }
909784182edSChangpeng Liu 
910aa6767fbSNiklas Cassel static int
911aa6767fbSNiklas Cassel nvme_ns_cmd_check_zone_append(struct spdk_nvme_ns *ns, uint32_t lba_count, uint32_t io_flags)
912aa6767fbSNiklas Cassel {
913aa6767fbSNiklas Cassel 	uint32_t sector_size;
914aa6767fbSNiklas Cassel 
915aa6767fbSNiklas Cassel 	/* Not all NVMe Zoned Namespaces support the zone append command. */
916aa6767fbSNiklas Cassel 	if (!(ns->ctrlr->flags & SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED)) {
917aa6767fbSNiklas Cassel 		return -EINVAL;
918aa6767fbSNiklas Cassel 	}
919aa6767fbSNiklas Cassel 
920aa6767fbSNiklas Cassel 	sector_size =  _nvme_get_host_buffer_sector_size(ns, io_flags);
921aa6767fbSNiklas Cassel 
922aa6767fbSNiklas Cassel 	/* Fail a too large zone append command early. */
923aa6767fbSNiklas Cassel 	if (lba_count * sector_size > ns->ctrlr->max_zone_append_size) {
924aa6767fbSNiklas Cassel 		return -EINVAL;
925aa6767fbSNiklas Cassel 	}
926aa6767fbSNiklas Cassel 
927aa6767fbSNiklas Cassel 	return 0;
928aa6767fbSNiklas Cassel }
929aa6767fbSNiklas Cassel 
930aa6767fbSNiklas Cassel int
931aa6767fbSNiklas Cassel nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
932aa6767fbSNiklas Cassel 				void *buffer, void *metadata, uint64_t zslba,
933aa6767fbSNiklas Cassel 				uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
934aa6767fbSNiklas Cassel 				uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
935aa6767fbSNiklas Cassel {
936aa6767fbSNiklas Cassel 	struct nvme_request *req;
937aa6767fbSNiklas Cassel 	struct nvme_payload payload;
938c7bb68aaSAlexey Marchuk 	int rc = 0;
939aa6767fbSNiklas Cassel 
940aa6767fbSNiklas Cassel 	if (!_is_io_flags_valid(io_flags)) {
941aa6767fbSNiklas Cassel 		return -EINVAL;
942aa6767fbSNiklas Cassel 	}
943aa6767fbSNiklas Cassel 
944c7bb68aaSAlexey Marchuk 	rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
945c7bb68aaSAlexey Marchuk 	if (rc) {
946c7bb68aaSAlexey Marchuk 		return rc;
947aa6767fbSNiklas Cassel 	}
948aa6767fbSNiklas Cassel 
949aa6767fbSNiklas Cassel 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
950aa6767fbSNiklas Cassel 
951aa6767fbSNiklas Cassel 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
952aa6767fbSNiklas Cassel 			      SPDK_NVME_OPC_ZONE_APPEND,
953dc8d4d8dSKonrad Sztyber 			      io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
954aa6767fbSNiklas Cassel 	if (req != NULL) {
955b05b3d0aSNiklas Cassel 		/*
956b05b3d0aSNiklas Cassel 		 * Zone append commands cannot be split (num_children has to be 0).
957b05b3d0aSNiklas Cassel 		 * For NVME_PAYLOAD_TYPE_CONTIG, _nvme_ns_cmd_rw() should never cause a split
958b05b3d0aSNiklas Cassel 		 * to happen, since a too large request would have already been failed by
959b05b3d0aSNiklas Cassel 		 * nvme_ns_cmd_check_zone_append(), since zasl <= mdts.
960b05b3d0aSNiklas Cassel 		 */
961b05b3d0aSNiklas Cassel 		assert(req->num_children == 0);
962b05b3d0aSNiklas Cassel 		if (req->num_children) {
963b05b3d0aSNiklas Cassel 			nvme_request_free_children(req);
964b05b3d0aSNiklas Cassel 			nvme_free_request(req);
965b05b3d0aSNiklas Cassel 			return -EINVAL;
966b05b3d0aSNiklas Cassel 		}
967aa6767fbSNiklas Cassel 		return nvme_qpair_submit_request(qpair, req);
968a1f848b0SAlexey Marchuk 	} else {
969a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
970aa6767fbSNiklas Cassel 					      ns->sectors_per_max_io,
971aa6767fbSNiklas Cassel 					      ns->sectors_per_stripe,
972a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
973c7bb68aaSAlexey Marchuk 					      rc);
974aa6767fbSNiklas Cassel 	}
975aa6767fbSNiklas Cassel }
976aa6767fbSNiklas Cassel 
977784182edSChangpeng Liu int
978014baeb8SNiklas Cassel nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
979014baeb8SNiklas Cassel 				 uint64_t zslba, uint32_t lba_count,
980014baeb8SNiklas Cassel 				 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
981014baeb8SNiklas Cassel 				 spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
982014baeb8SNiklas Cassel 				 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
983014baeb8SNiklas Cassel 				 uint16_t apptag_mask, uint16_t apptag)
984014baeb8SNiklas Cassel {
985014baeb8SNiklas Cassel 	struct nvme_request *req;
986014baeb8SNiklas Cassel 	struct nvme_payload payload;
987c7bb68aaSAlexey Marchuk 	int rc = 0;
988014baeb8SNiklas Cassel 
989014baeb8SNiklas Cassel 	if (!_is_io_flags_valid(io_flags)) {
990014baeb8SNiklas Cassel 		return -EINVAL;
991014baeb8SNiklas Cassel 	}
992014baeb8SNiklas Cassel 
993014baeb8SNiklas Cassel 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
994014baeb8SNiklas Cassel 		return -EINVAL;
995014baeb8SNiklas Cassel 	}
996014baeb8SNiklas Cassel 
997c7bb68aaSAlexey Marchuk 	rc = nvme_ns_cmd_check_zone_append(ns, lba_count, io_flags);
998c7bb68aaSAlexey Marchuk 	if (rc) {
999c7bb68aaSAlexey Marchuk 		return rc;
1000014baeb8SNiklas Cassel 	}
1001014baeb8SNiklas Cassel 
1002014baeb8SNiklas Cassel 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1003014baeb8SNiklas Cassel 
1004014baeb8SNiklas Cassel 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
1005014baeb8SNiklas Cassel 			      SPDK_NVME_OPC_ZONE_APPEND,
1006dc8d4d8dSKonrad Sztyber 			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
1007014baeb8SNiklas Cassel 	if (req != NULL) {
1008014baeb8SNiklas Cassel 		/*
1009014baeb8SNiklas Cassel 		 * Zone append commands cannot be split (num_children has to be 0).
1010014baeb8SNiklas Cassel 		 * For NVME_PAYLOAD_TYPE_SGL, _nvme_ns_cmd_rw() can cause a split.
1011014baeb8SNiklas Cassel 		 * However, _nvme_ns_cmd_split_request_sgl() and _nvme_ns_cmd_split_request_prp()
1012014baeb8SNiklas Cassel 		 * do not always cause a request to be split. These functions verify payload size,
1013014baeb8SNiklas Cassel 		 * verify num sge < max_sge, and verify SGE alignment rules (in case of PRPs).
1014014baeb8SNiklas Cassel 		 * If any of the verifications fail, they will split the request.
1015014baeb8SNiklas Cassel 		 * In our case, a split is very unlikely, since we already verified the size using
1016014baeb8SNiklas Cassel 		 * nvme_ns_cmd_check_zone_append(), however, we still need to call these functions
1017014baeb8SNiklas Cassel 		 * in order to perform the verification part. If they do cause a split, we return
1018014baeb8SNiklas Cassel 		 * an error here. For proper requests, these functions will never cause a split.
1019014baeb8SNiklas Cassel 		 */
1020014baeb8SNiklas Cassel 		if (req->num_children) {
1021014baeb8SNiklas Cassel 			nvme_request_free_children(req);
1022014baeb8SNiklas Cassel 			nvme_free_request(req);
1023014baeb8SNiklas Cassel 			return -EINVAL;
1024014baeb8SNiklas Cassel 		}
1025014baeb8SNiklas Cassel 		return nvme_qpair_submit_request(qpair, req);
1026a1f848b0SAlexey Marchuk 	} else {
1027a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
1028014baeb8SNiklas Cassel 					      ns->sectors_per_max_io,
1029014baeb8SNiklas Cassel 					      ns->sectors_per_stripe,
1030a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
1031c7bb68aaSAlexey Marchuk 					      rc);
1032014baeb8SNiklas Cassel 	}
1033014baeb8SNiklas Cassel }
1034014baeb8SNiklas Cassel 
1035014baeb8SNiklas Cassel int
1036784182edSChangpeng Liu spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1037784182edSChangpeng Liu 			       void *buffer, void *metadata, uint64_t lba,
1038784182edSChangpeng Liu 			       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1039784182edSChangpeng Liu 			       uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
1040784182edSChangpeng Liu {
1041784182edSChangpeng Liu 	struct nvme_request *req;
1042784182edSChangpeng Liu 	struct nvme_payload payload;
1043a1f848b0SAlexey Marchuk 	int rc = 0;
1044784182edSChangpeng Liu 
1045a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
1046a7b6702dSTomasz Kulasek 		return -EINVAL;
1047a7b6702dSTomasz Kulasek 	}
1048a7b6702dSTomasz Kulasek 
1049caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
1050784182edSChangpeng Liu 
1051cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1052dc8d4d8dSKonrad Sztyber 			      io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
1053d3d6d19bSChangpeng Liu 	if (req != NULL) {
1054eb555b13SDaniel Verkamp 		return nvme_qpair_submit_request(qpair, req);
1055a1f848b0SAlexey Marchuk 	} else {
1056a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
1057fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
1058fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
1059a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
1060a1f848b0SAlexey Marchuk 					      rc);
1061d3d6d19bSChangpeng Liu 	}
1062d3d6d19bSChangpeng Liu }
1063d3d6d19bSChangpeng Liu 
1064d3d6d19bSChangpeng Liu int
10654f1a0b26SJacek Kalwas spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
10664f1a0b26SJacek Kalwas 			   void *buffer, uint64_t lba,
10674f1a0b26SJacek Kalwas 			   uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
10684f1a0b26SJacek Kalwas 			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
10694f1a0b26SJacek Kalwas {
10704f1a0b26SJacek Kalwas 	return nvme_ns_cmd_rw_ext(ns, qpair, buffer, lba, lba_count, cb_fn, cb_arg, opts,
10714f1a0b26SJacek Kalwas 				  SPDK_NVME_OPC_WRITE);
10724f1a0b26SJacek Kalwas }
10734f1a0b26SJacek Kalwas 
10744f1a0b26SJacek Kalwas int
10753272320cSDaniel Verkamp spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
10763272320cSDaniel Verkamp 			uint64_t lba, uint32_t lba_count,
10776ce73aa6SDaniel Verkamp 			spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
10786ce73aa6SDaniel Verkamp 			spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
10796ce73aa6SDaniel Verkamp 			spdk_nvme_req_next_sge_cb next_sge_fn)
1080d3d6d19bSChangpeng Liu {
1081d3d6d19bSChangpeng Liu 	struct nvme_request *req;
1082407a5716SDaniel Verkamp 	struct nvme_payload payload;
1083a1f848b0SAlexey Marchuk 	int rc = 0;
1084d3d6d19bSChangpeng Liu 
1085a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
1086a7b6702dSTomasz Kulasek 		return -EINVAL;
1087a7b6702dSTomasz Kulasek 	}
1088a7b6702dSTomasz Kulasek 
108959970a89SDaniel Verkamp 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1090f0b20026SMinfei Huang 		return -EINVAL;
109159970a89SDaniel Verkamp 	}
1092ee292e4bSLiang Yan 
1093caf85d8fSDaniel Verkamp 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
1094407a5716SDaniel Verkamp 
1095cd13f280SDaniel Verkamp 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1096dc8d4d8dSKonrad Sztyber 			      io_flags, 0, 0, 0, true, NULL, &rc);
10971010fb3aSDaniel Verkamp 	if (req != NULL) {
1098eb555b13SDaniel Verkamp 		return nvme_qpair_submit_request(qpair, req);
1099a1f848b0SAlexey Marchuk 	} else {
1100a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
1101fbf06acaSPiotr Pelplinski 					      ns->sectors_per_max_io,
1102fbf06acaSPiotr Pelplinski 					      ns->sectors_per_stripe,
1103a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
1104a1f848b0SAlexey Marchuk 					      rc);
11051010fb3aSDaniel Verkamp 	}
11061010fb3aSDaniel Verkamp }
11071010fb3aSDaniel Verkamp 
11081010fb3aSDaniel Verkamp int
1109f52d8008SYoung Tack Jin spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1110f52d8008SYoung Tack Jin 				uint64_t lba, uint32_t lba_count,
1111f52d8008SYoung Tack Jin 				spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
1112f52d8008SYoung Tack Jin 				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
11135f1c1189SXiaodong Liu 				spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
11145f1c1189SXiaodong Liu 				uint16_t apptag_mask, uint16_t apptag)
1115f52d8008SYoung Tack Jin {
1116f52d8008SYoung Tack Jin 	struct nvme_request *req;
1117f52d8008SYoung Tack Jin 	struct nvme_payload payload;
1118a1f848b0SAlexey Marchuk 	int rc = 0;
1119f52d8008SYoung Tack Jin 
1120a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
1121a7b6702dSTomasz Kulasek 		return -EINVAL;
1122a7b6702dSTomasz Kulasek 	}
1123a7b6702dSTomasz Kulasek 
1124f52d8008SYoung Tack Jin 	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
1125f52d8008SYoung Tack Jin 		return -EINVAL;
1126f52d8008SYoung Tack Jin 	}
1127f52d8008SYoung Tack Jin 
1128f52d8008SYoung Tack Jin 	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1129f52d8008SYoung Tack Jin 
1130f52d8008SYoung Tack Jin 	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1131dc8d4d8dSKonrad Sztyber 			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
1132f52d8008SYoung Tack Jin 	if (req != NULL) {
1133f52d8008SYoung Tack Jin 		return nvme_qpair_submit_request(qpair, req);
1134a1f848b0SAlexey Marchuk 	} else {
1135a1f848b0SAlexey Marchuk 		return nvme_ns_map_failure_rc(lba_count,
1136f52d8008SYoung Tack Jin 					      ns->sectors_per_max_io,
1137f52d8008SYoung Tack Jin 					      ns->sectors_per_stripe,
1138a1f848b0SAlexey Marchuk 					      qpair->ctrlr->opts.io_queue_requests,
1139a1f848b0SAlexey Marchuk 					      rc);
1140f52d8008SYoung Tack Jin 	}
1141f52d8008SYoung Tack Jin }
1142f52d8008SYoung Tack Jin 
1143f52d8008SYoung Tack Jin int
1144110335f1SAlexey Marchuk spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
1145110335f1SAlexey Marchuk 			    uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1146110335f1SAlexey Marchuk 			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
1147110335f1SAlexey Marchuk 			    spdk_nvme_req_next_sge_cb next_sge_fn,
1148110335f1SAlexey Marchuk 			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
1149110335f1SAlexey Marchuk {
1150ba58dd60SJacek Kalwas 	return nvme_ns_cmd_rwv_ext(ns, qpair, lba, lba_count, cb_fn, cb_arg, reset_sgl_fn, next_sge_fn,
1151ba58dd60SJacek Kalwas 				   opts, SPDK_NVME_OPC_WRITE);
1152110335f1SAlexey Marchuk }
1153110335f1SAlexey Marchuk 
1154110335f1SAlexey Marchuk int
11553272320cSDaniel Verkamp spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
11563272320cSDaniel Verkamp 			      uint64_t lba, uint32_t lba_count,
11573272320cSDaniel Verkamp 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg,
115820c767e7SHaomai Wang 			      uint32_t io_flags)
115920c767e7SHaomai Wang {
116020c767e7SHaomai Wang 	struct nvme_request	*req;
1161ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
116220c767e7SHaomai Wang 	uint64_t		*tmp_lba;
116320c767e7SHaomai Wang 
1164a7b6702dSTomasz Kulasek 	if (!_is_io_flags_valid(io_flags)) {
1165a7b6702dSTomasz Kulasek 		return -EINVAL;
1166a7b6702dSTomasz Kulasek 	}
1167a7b6702dSTomasz Kulasek 
11688fbcc1e3SSeth Howell 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1169f0b20026SMinfei Huang 		return -EINVAL;
117020c767e7SHaomai Wang 	}
117120c767e7SHaomai Wang 
1172cd13f280SDaniel Verkamp 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
117320c767e7SHaomai Wang 	if (req == NULL) {
1174f0b20026SMinfei Huang 		return -ENOMEM;
117520c767e7SHaomai Wang 	}
117620c767e7SHaomai Wang 
117720c767e7SHaomai Wang 	cmd = &req->cmd;
1178ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
117920c767e7SHaomai Wang 	cmd->nsid = ns->id;
118020c767e7SHaomai Wang 
118120c767e7SHaomai Wang 	tmp_lba = (uint64_t *)&cmd->cdw10;
118220c767e7SHaomai Wang 	*tmp_lba = lba;
118320c767e7SHaomai Wang 	cmd->cdw12 = lba_count - 1;
11841c5d9803STomasz Kulasek 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
11851c5d9803STomasz Kulasek 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
118620c767e7SHaomai Wang 
1187eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
118820c767e7SHaomai Wang }
118920c767e7SHaomai Wang 
119020c767e7SHaomai Wang int
1191eda407a6SZiv Hirsch spdk_nvme_ns_cmd_verify(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1192eda407a6SZiv Hirsch 			uint64_t lba, uint32_t lba_count,
1193eda407a6SZiv Hirsch 			spdk_nvme_cmd_cb cb_fn, void *cb_arg,
1194eda407a6SZiv Hirsch 			uint32_t io_flags)
1195eda407a6SZiv Hirsch {
1196eda407a6SZiv Hirsch 	struct nvme_request	*req;
1197eda407a6SZiv Hirsch 	struct spdk_nvme_cmd	*cmd;
1198eda407a6SZiv Hirsch 
1199eda407a6SZiv Hirsch 	if (!_is_io_flags_valid(io_flags)) {
1200eda407a6SZiv Hirsch 		return -EINVAL;
1201eda407a6SZiv Hirsch 	}
1202eda407a6SZiv Hirsch 
1203eda407a6SZiv Hirsch 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
1204eda407a6SZiv Hirsch 		return -EINVAL;
1205eda407a6SZiv Hirsch 	}
1206eda407a6SZiv Hirsch 
1207eda407a6SZiv Hirsch 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
1208eda407a6SZiv Hirsch 	if (req == NULL) {
1209eda407a6SZiv Hirsch 		return -ENOMEM;
1210eda407a6SZiv Hirsch 	}
1211eda407a6SZiv Hirsch 
1212eda407a6SZiv Hirsch 	cmd = &req->cmd;
1213eda407a6SZiv Hirsch 	cmd->opc = SPDK_NVME_OPC_VERIFY;
1214eda407a6SZiv Hirsch 	cmd->nsid = ns->id;
1215eda407a6SZiv Hirsch 
1216eda407a6SZiv Hirsch 	*(uint64_t *)&cmd->cdw10 = lba;
1217eda407a6SZiv Hirsch 	cmd->cdw12 = lba_count - 1;
1218eda407a6SZiv Hirsch 	cmd->fuse = (io_flags & SPDK_NVME_IO_FLAGS_FUSE_MASK);
1219eda407a6SZiv Hirsch 	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);
1220eda407a6SZiv Hirsch 
1221eda407a6SZiv Hirsch 	return nvme_qpair_submit_request(qpair, req);
1222eda407a6SZiv Hirsch }
1223eda407a6SZiv Hirsch 
1224eda407a6SZiv Hirsch int
12256bcd3588SBenjamin Saunders spdk_nvme_ns_cmd_write_uncorrectable(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
12266bcd3588SBenjamin Saunders 				     uint64_t lba, uint32_t lba_count,
12276bcd3588SBenjamin Saunders 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
12286bcd3588SBenjamin Saunders {
12296bcd3588SBenjamin Saunders 	struct nvme_request	*req;
12306bcd3588SBenjamin Saunders 	struct spdk_nvme_cmd	*cmd;
12316bcd3588SBenjamin Saunders 	uint64_t		*tmp_lba;
12326bcd3588SBenjamin Saunders 
12336bcd3588SBenjamin Saunders 	if (lba_count == 0 || lba_count > UINT16_MAX + 1) {
12346bcd3588SBenjamin Saunders 		return -EINVAL;
12356bcd3588SBenjamin Saunders 	}
12366bcd3588SBenjamin Saunders 
12376bcd3588SBenjamin Saunders 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
12386bcd3588SBenjamin Saunders 	if (req == NULL) {
12396bcd3588SBenjamin Saunders 		return -ENOMEM;
12406bcd3588SBenjamin Saunders 	}
12416bcd3588SBenjamin Saunders 
12426bcd3588SBenjamin Saunders 	cmd = &req->cmd;
12436bcd3588SBenjamin Saunders 	cmd->opc = SPDK_NVME_OPC_WRITE_UNCORRECTABLE;
12446bcd3588SBenjamin Saunders 	cmd->nsid = ns->id;
12456bcd3588SBenjamin Saunders 
12466bcd3588SBenjamin Saunders 	tmp_lba = (uint64_t *)&cmd->cdw10;
12476bcd3588SBenjamin Saunders 	*tmp_lba = lba;
12486bcd3588SBenjamin Saunders 	cmd->cdw12 = lba_count - 1;
12496bcd3588SBenjamin Saunders 
12506bcd3588SBenjamin Saunders 	return nvme_qpair_submit_request(qpair, req);
12516bcd3588SBenjamin Saunders }
12526bcd3588SBenjamin Saunders 
12536bcd3588SBenjamin Saunders int
1254f4140ad0SBen Walker spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
1255f4140ad0SBen Walker 				    uint32_t type,
1256f4140ad0SBen Walker 				    const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges,
1257f4140ad0SBen Walker 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
12581010fb3aSDaniel Verkamp {
12591010fb3aSDaniel Verkamp 	struct nvme_request	*req;
1260ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
12611010fb3aSDaniel Verkamp 
1262ad35d6cdSDaniel Verkamp 	if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
1263f0b20026SMinfei Huang 		return -EINVAL;
12641010fb3aSDaniel Verkamp 	}
12651010fb3aSDaniel Verkamp 
1266f4140ad0SBen Walker 	if (ranges == NULL) {
1267f4140ad0SBen Walker 		return -EINVAL;
1268f4140ad0SBen Walker 	}
1269f4140ad0SBen Walker 
1270cd13f280SDaniel Verkamp 	req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
1271ad35d6cdSDaniel Verkamp 					      num_ranges * sizeof(struct spdk_nvme_dsm_range),
1272f4140ad0SBen Walker 					      cb_fn, cb_arg, true);
12731010fb3aSDaniel Verkamp 	if (req == NULL) {
1274f0b20026SMinfei Huang 		return -ENOMEM;
12751010fb3aSDaniel Verkamp 	}
12761010fb3aSDaniel Verkamp 
12771010fb3aSDaniel Verkamp 	cmd = &req->cmd;
1278ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
12791010fb3aSDaniel Verkamp 	cmd->nsid = ns->id;
12801010fb3aSDaniel Verkamp 
12811fea1fccSChangpeng Liu 	cmd->cdw10_bits.dsm.nr = num_ranges - 1;
1282f4140ad0SBen Walker 	cmd->cdw11 = type;
12831010fb3aSDaniel Verkamp 
1284eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
12851010fb3aSDaniel Verkamp }
12861010fb3aSDaniel Verkamp 
12871010fb3aSDaniel Verkamp int
12887a12f481SKrishna Kanth Reddy spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
12897a12f481SKrishna Kanth Reddy 		      const struct spdk_nvme_scc_source_range *ranges,
12907a12f481SKrishna Kanth Reddy 		      uint16_t num_ranges, uint64_t dest_lba,
12917a12f481SKrishna Kanth Reddy 		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
12927a12f481SKrishna Kanth Reddy {
12937a12f481SKrishna Kanth Reddy 	struct nvme_request	*req;
12947a12f481SKrishna Kanth Reddy 	struct spdk_nvme_cmd	*cmd;
12957a12f481SKrishna Kanth Reddy 
12967a12f481SKrishna Kanth Reddy 	if (num_ranges == 0) {
12977a12f481SKrishna Kanth Reddy 		return -EINVAL;
12987a12f481SKrishna Kanth Reddy 	}
12997a12f481SKrishna Kanth Reddy 
13007a12f481SKrishna Kanth Reddy 	if (ranges == NULL) {
13017a12f481SKrishna Kanth Reddy 		return -EINVAL;
13027a12f481SKrishna Kanth Reddy 	}
13037a12f481SKrishna Kanth Reddy 
13047a12f481SKrishna Kanth Reddy 	req = nvme_allocate_request_user_copy(qpair, (void *)ranges,
13057a12f481SKrishna Kanth Reddy 					      num_ranges * sizeof(struct spdk_nvme_scc_source_range),
13067a12f481SKrishna Kanth Reddy 					      cb_fn, cb_arg, true);
13077a12f481SKrishna Kanth Reddy 	if (req == NULL) {
13087a12f481SKrishna Kanth Reddy 		return -ENOMEM;
13097a12f481SKrishna Kanth Reddy 	}
13107a12f481SKrishna Kanth Reddy 
13117a12f481SKrishna Kanth Reddy 	cmd = &req->cmd;
13127a12f481SKrishna Kanth Reddy 	cmd->opc = SPDK_NVME_OPC_COPY;
13137a12f481SKrishna Kanth Reddy 	cmd->nsid = ns->id;
13147a12f481SKrishna Kanth Reddy 
13157a12f481SKrishna Kanth Reddy 	*(uint64_t *)&cmd->cdw10 = dest_lba;
13167a12f481SKrishna Kanth Reddy 	cmd->cdw12 = num_ranges - 1;
13177a12f481SKrishna Kanth Reddy 
13187a12f481SKrishna Kanth Reddy 	return nvme_qpair_submit_request(qpair, req);
13197a12f481SKrishna Kanth Reddy }
13207a12f481SKrishna Kanth Reddy 
13217a12f481SKrishna Kanth Reddy int
13223272320cSDaniel Verkamp spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
13233272320cSDaniel Verkamp 		       spdk_nvme_cmd_cb cb_fn, void *cb_arg)
13241010fb3aSDaniel Verkamp {
13251010fb3aSDaniel Verkamp 	struct nvme_request	*req;
1326ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
13271010fb3aSDaniel Verkamp 
1328cd13f280SDaniel Verkamp 	req = nvme_allocate_request_null(qpair, cb_fn, cb_arg);
13291010fb3aSDaniel Verkamp 	if (req == NULL) {
1330f0b20026SMinfei Huang 		return -ENOMEM;
13311010fb3aSDaniel Verkamp 	}
13321010fb3aSDaniel Verkamp 
13331010fb3aSDaniel Verkamp 	cmd = &req->cmd;
1334ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_FLUSH;
13351010fb3aSDaniel Verkamp 	cmd->nsid = ns->id;
13361010fb3aSDaniel Verkamp 
1337eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
13381010fb3aSDaniel Verkamp }
133992fa3ec5SChangpeng Liu 
134092fa3ec5SChangpeng Liu int
13416ce73aa6SDaniel Verkamp spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
13423272320cSDaniel Verkamp 				      struct spdk_nvme_qpair *qpair,
1343ad35d6cdSDaniel Verkamp 				      struct spdk_nvme_reservation_register_data *payload,
134492fa3ec5SChangpeng Liu 				      bool ignore_key,
1345ad35d6cdSDaniel Verkamp 				      enum spdk_nvme_reservation_register_action action,
1346ad35d6cdSDaniel Verkamp 				      enum spdk_nvme_reservation_register_cptpl cptpl,
13476ce73aa6SDaniel Verkamp 				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
134892fa3ec5SChangpeng Liu {
134992fa3ec5SChangpeng Liu 	struct nvme_request	*req;
1350ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
135192fa3ec5SChangpeng Liu 
1352cd13f280SDaniel Verkamp 	req = nvme_allocate_request_user_copy(qpair,
1353cd13f280SDaniel Verkamp 					      payload, sizeof(struct spdk_nvme_reservation_register_data),
13545e9d8593SDaniel Verkamp 					      cb_fn, cb_arg, true);
135592fa3ec5SChangpeng Liu 	if (req == NULL) {
1356f0b20026SMinfei Huang 		return -ENOMEM;
135792fa3ec5SChangpeng Liu 	}
135892fa3ec5SChangpeng Liu 
135992fa3ec5SChangpeng Liu 	cmd = &req->cmd;
1360ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
136192fa3ec5SChangpeng Liu 	cmd->nsid = ns->id;
136292fa3ec5SChangpeng Liu 
13631fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_register.rrega = action;
13641fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_register.iekey = ignore_key;
13651fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_register.cptpl = cptpl;
136692fa3ec5SChangpeng Liu 
1367eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
136892fa3ec5SChangpeng Liu }
136992fa3ec5SChangpeng Liu 
137092fa3ec5SChangpeng Liu int
13716ce73aa6SDaniel Verkamp spdk_nvme_ns_cmd_reservation_release(struct spdk_nvme_ns *ns,
13723272320cSDaniel Verkamp 				     struct spdk_nvme_qpair *qpair,
1373ad35d6cdSDaniel Verkamp 				     struct spdk_nvme_reservation_key_data *payload,
137492fa3ec5SChangpeng Liu 				     bool ignore_key,
1375ad35d6cdSDaniel Verkamp 				     enum spdk_nvme_reservation_release_action action,
1376ad35d6cdSDaniel Verkamp 				     enum spdk_nvme_reservation_type type,
13776ce73aa6SDaniel Verkamp 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
137892fa3ec5SChangpeng Liu {
137992fa3ec5SChangpeng Liu 	struct nvme_request	*req;
1380ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
138192fa3ec5SChangpeng Liu 
1382cd13f280SDaniel Verkamp 	req = nvme_allocate_request_user_copy(qpair,
1383cd13f280SDaniel Verkamp 					      payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
13845e9d8593SDaniel Verkamp 					      cb_arg, true);
138592fa3ec5SChangpeng Liu 	if (req == NULL) {
1386f0b20026SMinfei Huang 		return -ENOMEM;
138792fa3ec5SChangpeng Liu 	}
138892fa3ec5SChangpeng Liu 
138992fa3ec5SChangpeng Liu 	cmd = &req->cmd;
1390ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
139192fa3ec5SChangpeng Liu 	cmd->nsid = ns->id;
139292fa3ec5SChangpeng Liu 
13931fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_release.rrela = action;
13941fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_release.iekey = ignore_key;
13951fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_release.rtype = type;
139692fa3ec5SChangpeng Liu 
1397eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
139892fa3ec5SChangpeng Liu }
139992fa3ec5SChangpeng Liu 
140092fa3ec5SChangpeng Liu int
14016ce73aa6SDaniel Verkamp spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
14023272320cSDaniel Verkamp 				     struct spdk_nvme_qpair *qpair,
1403ad35d6cdSDaniel Verkamp 				     struct spdk_nvme_reservation_acquire_data *payload,
140492fa3ec5SChangpeng Liu 				     bool ignore_key,
1405ad35d6cdSDaniel Verkamp 				     enum spdk_nvme_reservation_acquire_action action,
1406ad35d6cdSDaniel Verkamp 				     enum spdk_nvme_reservation_type type,
14076ce73aa6SDaniel Verkamp 				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
140892fa3ec5SChangpeng Liu {
140992fa3ec5SChangpeng Liu 	struct nvme_request	*req;
1410ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
141192fa3ec5SChangpeng Liu 
1412cd13f280SDaniel Verkamp 	req = nvme_allocate_request_user_copy(qpair,
1413cd13f280SDaniel Verkamp 					      payload, sizeof(struct spdk_nvme_reservation_acquire_data),
14145e9d8593SDaniel Verkamp 					      cb_fn, cb_arg, true);
141592fa3ec5SChangpeng Liu 	if (req == NULL) {
1416f0b20026SMinfei Huang 		return -ENOMEM;
141792fa3ec5SChangpeng Liu 	}
141892fa3ec5SChangpeng Liu 
141992fa3ec5SChangpeng Liu 	cmd = &req->cmd;
1420ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
142192fa3ec5SChangpeng Liu 	cmd->nsid = ns->id;
142292fa3ec5SChangpeng Liu 
14231fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_acquire.racqa = action;
14241fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_acquire.iekey = ignore_key;
14251fea1fccSChangpeng Liu 	cmd->cdw10_bits.resv_acquire.rtype = type;
142692fa3ec5SChangpeng Liu 
1427eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
142892fa3ec5SChangpeng Liu }
142992fa3ec5SChangpeng Liu 
143092fa3ec5SChangpeng Liu int
14313272320cSDaniel Verkamp spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns,
14323272320cSDaniel Verkamp 				    struct spdk_nvme_qpair *qpair,
14333272320cSDaniel Verkamp 				    void *payload, uint32_t len,
14343272320cSDaniel Verkamp 				    spdk_nvme_cmd_cb cb_fn, void *cb_arg)
143592fa3ec5SChangpeng Liu {
143692fa3ec5SChangpeng Liu 	uint32_t		num_dwords;
143792fa3ec5SChangpeng Liu 	struct nvme_request	*req;
1438ad35d6cdSDaniel Verkamp 	struct spdk_nvme_cmd	*cmd;
143992fa3ec5SChangpeng Liu 
144082c61e06SAnkit Kumar 	if (len & 0x3) {
1441f0b20026SMinfei Huang 		return -EINVAL;
144259970a89SDaniel Verkamp 	}
144392fa3ec5SChangpeng Liu 
1444cd13f280SDaniel Verkamp 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
144592fa3ec5SChangpeng Liu 	if (req == NULL) {
1446f0b20026SMinfei Huang 		return -ENOMEM;
144792fa3ec5SChangpeng Liu 	}
144892fa3ec5SChangpeng Liu 
144992fa3ec5SChangpeng Liu 	cmd = &req->cmd;
1450ad35d6cdSDaniel Verkamp 	cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
145192fa3ec5SChangpeng Liu 	cmd->nsid = ns->id;
145292fa3ec5SChangpeng Liu 
145382c61e06SAnkit Kumar 	num_dwords = (len >> 2);
145482c61e06SAnkit Kumar 	cmd->cdw10 = num_dwords - 1; /* 0-based */
145592fa3ec5SChangpeng Liu 
1456eb555b13SDaniel Verkamp 	return nvme_qpair_submit_request(qpair, req);
145792fa3ec5SChangpeng Liu }
14589a1457ffSAnkit Kumar 
14599a1457ffSAnkit Kumar int
14609a1457ffSAnkit Kumar spdk_nvme_ns_cmd_io_mgmt_recv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
14619a1457ffSAnkit Kumar 			      void *payload, uint32_t len, uint8_t mo, uint16_t mos,
14629a1457ffSAnkit Kumar 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
14639a1457ffSAnkit Kumar {
14649a1457ffSAnkit Kumar 	uint32_t		num_dwords;
14659a1457ffSAnkit Kumar 	struct nvme_request	*req;
14669a1457ffSAnkit Kumar 	struct spdk_nvme_cmd	*cmd;
14679a1457ffSAnkit Kumar 
14689a1457ffSAnkit Kumar 	if (len & 0x3) {
14699a1457ffSAnkit Kumar 		return -EINVAL;
14709a1457ffSAnkit Kumar 	}
14719a1457ffSAnkit Kumar 
14729a1457ffSAnkit Kumar 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
14739a1457ffSAnkit Kumar 	if (req == NULL) {
14749a1457ffSAnkit Kumar 		return -ENOMEM;
14759a1457ffSAnkit Kumar 	}
14769a1457ffSAnkit Kumar 
14779a1457ffSAnkit Kumar 	cmd = &req->cmd;
14789a1457ffSAnkit Kumar 	cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_RECEIVE;
14799a1457ffSAnkit Kumar 	cmd->nsid = ns->id;
14809a1457ffSAnkit Kumar 
14819a1457ffSAnkit Kumar 	cmd->cdw10_bits.mgmt_send_recv.mo = mo;
14829a1457ffSAnkit Kumar 	cmd->cdw10_bits.mgmt_send_recv.mos = mos;
14839a1457ffSAnkit Kumar 
14849a1457ffSAnkit Kumar 	num_dwords = (len >> 2);
14859a1457ffSAnkit Kumar 	cmd->cdw11 = num_dwords - 1; /* 0-based */
14869a1457ffSAnkit Kumar 
14879a1457ffSAnkit Kumar 	return nvme_qpair_submit_request(qpair, req);
14889a1457ffSAnkit Kumar }
14899a1457ffSAnkit Kumar 
14909a1457ffSAnkit Kumar int
14919a1457ffSAnkit Kumar spdk_nvme_ns_cmd_io_mgmt_send(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
14929a1457ffSAnkit Kumar 			      void *payload, uint32_t len, uint8_t mo, uint16_t mos,
14939a1457ffSAnkit Kumar 			      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
14949a1457ffSAnkit Kumar {
14959a1457ffSAnkit Kumar 	struct nvme_request	*req;
14969a1457ffSAnkit Kumar 	struct spdk_nvme_cmd	*cmd;
14979a1457ffSAnkit Kumar 
14989a1457ffSAnkit Kumar 	req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
14999a1457ffSAnkit Kumar 	if (req == NULL) {
15009a1457ffSAnkit Kumar 		return -ENOMEM;
15019a1457ffSAnkit Kumar 	}
15029a1457ffSAnkit Kumar 
15039a1457ffSAnkit Kumar 	cmd = &req->cmd;
15049a1457ffSAnkit Kumar 	cmd->opc = SPDK_NVME_OPC_IO_MANAGEMENT_SEND;
15059a1457ffSAnkit Kumar 	cmd->nsid = ns->id;
15069a1457ffSAnkit Kumar 
15079a1457ffSAnkit Kumar 	cmd->cdw10_bits.mgmt_send_recv.mo = mo;
15089a1457ffSAnkit Kumar 	cmd->cdw10_bits.mgmt_send_recv.mos = mos;
15099a1457ffSAnkit Kumar 
15109a1457ffSAnkit Kumar 	return nvme_qpair_submit_request(qpair, req);
15119a1457ffSAnkit Kumar }
1512