xref: /spdk/lib/nvmf/ctrlr_bdev.c (revision 1966f1eef3f99d9ff26320cff449c6c03017ea66)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 
38 #include "spdk/bdev.h"
39 #include "spdk/endian.h"
40 #include "spdk/thread.h"
41 #include "spdk/likely.h"
42 #include "spdk/nvme.h"
43 #include "spdk/nvmf_spec.h"
44 #include "spdk/trace.h"
45 #include "spdk/scsi_spec.h"
46 #include "spdk/string.h"
47 #include "spdk/util.h"
48 
49 #include "spdk_internal/log.h"
50 
51 static bool
52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
53 		enum spdk_bdev_io_type io_type)
54 {
55 	struct spdk_nvmf_ns *ns;
56 
57 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
58 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
59 		if (ns->bdev == NULL) {
60 			continue;
61 		}
62 
63 		if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
64 			SPDK_DEBUGLOG(SPDK_LOG_NVMF,
65 				      "Subsystem %s namespace %u (%s) does not support io_type %d\n",
66 				      spdk_nvmf_subsystem_get_nqn(subsystem),
67 				      ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
68 			return false;
69 		}
70 	}
71 
72 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n",
73 		      spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
74 	return true;
75 }
76 
77 bool
78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
79 {
80 	return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
81 }
82 
83 bool
84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
85 {
86 	return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
87 }
88 
89 static void
90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
91 			     void *cb_arg)
92 {
93 	struct spdk_nvmf_request	*req = cb_arg;
94 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
95 	int				sc, sct;
96 
97 	spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
98 	response->status.sc = sc;
99 	response->status.sct = sct;
100 
101 	spdk_nvmf_request_complete(req);
102 	spdk_bdev_free_io(bdev_io);
103 }
104 
105 void
106 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata)
107 {
108 	struct spdk_bdev *bdev = ns->bdev;
109 	uint64_t num_blocks;
110 
111 	num_blocks = spdk_bdev_get_num_blocks(bdev);
112 
113 	nsdata->nsze = num_blocks;
114 	nsdata->ncap = num_blocks;
115 	nsdata->nuse = num_blocks;
116 	nsdata->nlbaf = 0;
117 	nsdata->flbas.format = 0;
118 	nsdata->lbaf[0].ms = spdk_bdev_get_md_size(bdev);
119 	nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev));
120 	if (nsdata->lbaf[0].ms != 0) {
121 		nsdata->flbas.extended = 1;
122 		nsdata->mc.extended = 1;
123 		nsdata->mc.pointer = 0;
124 		nsdata->dps.md_start = spdk_bdev_is_dif_head_of_md(bdev);
125 
126 		switch (spdk_bdev_get_dif_type(bdev)) {
127 		case SPDK_DIF_TYPE1:
128 			nsdata->dpc.pit1 = 1;
129 			nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1;
130 			break;
131 		case SPDK_DIF_TYPE2:
132 			nsdata->dpc.pit2 = 1;
133 			nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2;
134 			break;
135 		case SPDK_DIF_TYPE3:
136 			nsdata->dpc.pit3 = 1;
137 			nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3;
138 			break;
139 		default:
140 			SPDK_DEBUGLOG(SPDK_LOG_NVMF, "Protection Disabled\n");
141 			nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
142 			break;
143 		}
144 	}
145 	nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
146 	nsdata->nmic.can_share = 1;
147 	if (ns->ptpl_file != NULL) {
148 		nsdata->nsrescap.rescap.persist = 1;
149 	}
150 	nsdata->nsrescap.rescap.write_exclusive = 1;
151 	nsdata->nsrescap.rescap.exclusive_access = 1;
152 	nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
153 	nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
154 	nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
155 	nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
156 	nsdata->nsrescap.rescap.ignore_existing_key = 1;
157 
158 	SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
159 	memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
160 
161 	SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
162 	memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
163 }
164 
165 static void
166 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
167 			      uint64_t *num_blocks)
168 {
169 	/* SLBA: CDW10 and CDW11 */
170 	*start_lba = from_le64(&cmd->cdw10);
171 
172 	/* NLB: CDW12 bits 15:00, 0's based */
173 	*num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
174 }
175 
176 static bool
177 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
178 			     uint64_t io_num_blocks)
179 {
180 	if (io_start_lba + io_num_blocks > bdev_num_blocks ||
181 	    io_start_lba + io_num_blocks < io_start_lba) {
182 		return false;
183 	}
184 
185 	return true;
186 }
187 
188 static void
189 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
190 {
191 	struct spdk_nvmf_request *req = arg;
192 
193 	spdk_nvmf_ctrlr_process_io_cmd(req);
194 }
195 
196 static void
197 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
198 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
199 {
200 	int rc;
201 
202 	req->bdev_io_wait.bdev = bdev;
203 	req->bdev_io_wait.cb_fn = cb_fn;
204 	req->bdev_io_wait.cb_arg = cb_arg;
205 
206 	rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
207 	if (rc != 0) {
208 		assert(false);
209 	}
210 }
211 
212 int
213 spdk_nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
214 			      struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
215 {
216 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
217 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
218 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
219 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
220 	uint64_t start_lba;
221 	uint64_t num_blocks;
222 	int rc;
223 
224 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
225 
226 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
227 		SPDK_ERRLOG("end of media\n");
228 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
229 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
230 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
231 	}
232 
233 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
234 		SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
235 			    num_blocks, block_size, req->length);
236 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
237 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
238 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
239 	}
240 
241 	rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
242 				    nvmf_bdev_ctrlr_complete_cmd, req);
243 	if (spdk_unlikely(rc)) {
244 		if (rc == -ENOMEM) {
245 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
246 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
247 		}
248 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
249 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
250 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
251 	}
252 
253 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
254 }
255 
256 int
257 spdk_nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
258 			       struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
259 {
260 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
261 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
262 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
263 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
264 	uint64_t start_lba;
265 	uint64_t num_blocks;
266 	int rc;
267 
268 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
269 
270 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
271 		SPDK_ERRLOG("end of media\n");
272 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
273 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
274 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
275 	}
276 
277 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
278 		SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
279 			    num_blocks, block_size, req->length);
280 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
281 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
282 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
283 	}
284 
285 	rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
286 				     nvmf_bdev_ctrlr_complete_cmd, req);
287 	if (spdk_unlikely(rc)) {
288 		if (rc == -ENOMEM) {
289 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
290 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
291 		}
292 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
293 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
294 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
295 	}
296 
297 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
298 }
299 
300 int
301 spdk_nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
302 				      struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
303 {
304 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
305 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
306 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
307 	uint64_t start_lba;
308 	uint64_t num_blocks;
309 	int rc;
310 
311 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
312 
313 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
314 		SPDK_ERRLOG("end of media\n");
315 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
316 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
317 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
318 	}
319 
320 	rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
321 					   nvmf_bdev_ctrlr_complete_cmd, req);
322 	if (spdk_unlikely(rc)) {
323 		if (rc == -ENOMEM) {
324 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
325 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
326 		}
327 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
328 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
329 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
330 	}
331 
332 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
333 }
334 
335 int
336 spdk_nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
337 			       struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
338 {
339 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
340 	int rc;
341 
342 	/* As for NVMeoF controller, SPDK always set volatile write
343 	 * cache bit to 1, return success for those block devices
344 	 * which can't support FLUSH command.
345 	 */
346 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
347 		response->status.sct = SPDK_NVME_SCT_GENERIC;
348 		response->status.sc = SPDK_NVME_SC_SUCCESS;
349 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
350 	}
351 
352 	rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
353 				    nvmf_bdev_ctrlr_complete_cmd, req);
354 	if (spdk_unlikely(rc)) {
355 		if (rc == -ENOMEM) {
356 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
357 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
358 		}
359 		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
360 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
361 	}
362 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
363 }
364 
365 struct nvmf_bdev_ctrlr_unmap {
366 	struct spdk_nvmf_request	*req;
367 	uint32_t			count;
368 	struct spdk_bdev_desc		*desc;
369 	struct spdk_bdev		*bdev;
370 	struct spdk_io_channel		*ch;
371 	uint32_t			range_index;
372 };
373 
374 static void
375 nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
376 			  void *cb_arg)
377 {
378 	struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
379 	struct spdk_nvmf_request	*req = unmap_ctx->req;
380 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
381 	int				sc, sct;
382 
383 	unmap_ctx->count--;
384 
385 	if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
386 	    response->status.sc == SPDK_NVME_SC_SUCCESS) {
387 		spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
388 		response->status.sc = sc;
389 		response->status.sct = sct;
390 	}
391 
392 	if (unmap_ctx->count == 0) {
393 		spdk_nvmf_request_complete(req);
394 		free(unmap_ctx);
395 	}
396 	spdk_bdev_free_io(bdev_io);
397 }
398 
399 static int
400 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
401 		      struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
402 		      struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
403 static void
404 nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
405 {
406 	struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
407 	struct spdk_nvmf_request *req = unmap_ctx->req;
408 	struct spdk_bdev_desc *desc = unmap_ctx->desc;
409 	struct spdk_bdev *bdev = unmap_ctx->bdev;
410 	struct spdk_io_channel *ch = unmap_ctx->ch;
411 
412 	nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
413 }
414 
415 static int
416 nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
417 		      struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
418 		      struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
419 {
420 	uint16_t nr, i;
421 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
422 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
423 	struct spdk_nvme_dsm_range *dsm_range;
424 	uint64_t lba;
425 	uint32_t lba_count;
426 	int rc;
427 
428 	nr = ((cmd->cdw10 & 0x000000ff) + 1);
429 	if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
430 		SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
431 		response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
432 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
433 	}
434 
435 	if (unmap_ctx == NULL) {
436 		unmap_ctx = calloc(1, sizeof(*unmap_ctx));
437 		if (!unmap_ctx) {
438 			response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
439 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
440 		}
441 
442 		unmap_ctx->req = req;
443 		unmap_ctx->desc = desc;
444 		unmap_ctx->ch = ch;
445 		unmap_ctx->bdev = bdev;
446 
447 		response->status.sct = SPDK_NVME_SCT_GENERIC;
448 		response->status.sc = SPDK_NVME_SC_SUCCESS;
449 	} else {
450 		unmap_ctx->count--;	/* dequeued */
451 	}
452 
453 	dsm_range = (struct spdk_nvme_dsm_range *)req->data;
454 	for (i = unmap_ctx->range_index; i < nr; i++) {
455 		lba = dsm_range[i].starting_lba;
456 		lba_count = dsm_range[i].length;
457 
458 		unmap_ctx->count++;
459 
460 		rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
461 					    nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
462 		if (rc) {
463 			if (rc == -ENOMEM) {
464 				nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
465 				/* Unmap was not yet submitted to bdev */
466 				/* unmap_ctx->count will be decremented when the request is dequeued */
467 				return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
468 			}
469 			response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
470 			unmap_ctx->count--;
471 			/* We can't return here - we may have to wait for any other
472 				* unmaps already sent to complete */
473 			break;
474 		}
475 		unmap_ctx->range_index++;
476 	}
477 
478 	if (unmap_ctx->count == 0) {
479 		free(unmap_ctx);
480 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
481 	}
482 
483 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
484 }
485 
486 int
487 spdk_nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
488 			     struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
489 {
490 	uint32_t attribute;
491 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
492 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
493 
494 	attribute = cmd->cdw11 & 0x00000007;
495 	if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) {
496 		return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
497 	}
498 
499 	response->status.sct = SPDK_NVME_SCT_GENERIC;
500 	response->status.sc = SPDK_NVME_SC_SUCCESS;
501 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
502 }
503 
504 int
505 spdk_nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
506 				      struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
507 {
508 	int rc;
509 
510 	rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
511 					nvmf_bdev_ctrlr_complete_cmd, req);
512 	if (spdk_unlikely(rc)) {
513 		if (rc == -ENOMEM) {
514 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
515 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
516 		}
517 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
518 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
519 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
520 	}
521 
522 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
523 }
524