xref: /spdk/lib/nvmf/ctrlr_bdev.c (revision 438b71d17ebaee1e2d53f4548e6cf91ef1623ce2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 
38 #include "spdk/bdev.h"
39 #include "spdk/endian.h"
40 #include "spdk/thread.h"
41 #include "spdk/likely.h"
42 #include "spdk/nvme.h"
43 #include "spdk/nvmf_spec.h"
44 #include "spdk/trace.h"
45 #include "spdk/scsi_spec.h"
46 #include "spdk/string.h"
47 #include "spdk/util.h"
48 
49 #include "spdk_internal/log.h"
50 
51 static bool
52 spdk_nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
53 		enum spdk_bdev_io_type io_type)
54 {
55 	struct spdk_nvmf_ns *ns;
56 
57 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
58 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
59 		if (ns->bdev == NULL) {
60 			continue;
61 		}
62 
63 		if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
64 			SPDK_DEBUGLOG(SPDK_LOG_NVMF,
65 				      "Subsystem %s namespace %u (%s) does not support io_type %d\n",
66 				      spdk_nvmf_subsystem_get_nqn(subsystem),
67 				      ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
68 			return false;
69 		}
70 	}
71 
72 	SPDK_DEBUGLOG(SPDK_LOG_NVMF, "All devices in Subsystem %s support io_type %d\n",
73 		      spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
74 	return true;
75 }
76 
77 bool
78 spdk_nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
79 {
80 	return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
81 }
82 
83 bool
84 spdk_nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
85 {
86 	return spdk_nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
87 }
88 
89 static void
90 nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
91 			     void *cb_arg)
92 {
93 	struct spdk_nvmf_request	*req = cb_arg;
94 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
95 	int				sc, sct;
96 
97 	spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
98 	response->status.sc = sc;
99 	response->status.sct = sct;
100 
101 	spdk_nvmf_request_complete(req);
102 	spdk_bdev_free_io(bdev_io);
103 }
104 
105 int
106 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata)
107 {
108 	struct spdk_bdev *bdev = ns->bdev;
109 	uint64_t num_blocks;
110 
111 	num_blocks = spdk_bdev_get_num_blocks(bdev);
112 
113 	nsdata->nsze = num_blocks;
114 	nsdata->ncap = num_blocks;
115 	nsdata->nuse = num_blocks;
116 	nsdata->nlbaf = 0;
117 	nsdata->flbas.format = 0;
118 	nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_block_size(bdev));
119 	nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
120 	nsdata->nmic.can_share = 1;
121 
122 	SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
123 	memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
124 
125 	SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
126 	memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
127 
128 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
129 }
130 
131 static void
132 nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
133 			      uint64_t *num_blocks)
134 {
135 	/* SLBA: CDW10 and CDW11 */
136 	*start_lba = from_le64(&cmd->cdw10);
137 
138 	/* NLB: CDW12 bits 15:00, 0's based */
139 	*num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
140 }
141 
142 static bool
143 nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
144 			     uint64_t io_num_blocks)
145 {
146 	if (io_start_lba + io_num_blocks > bdev_num_blocks ||
147 	    io_start_lba + io_num_blocks < io_start_lba) {
148 		return false;
149 	}
150 
151 	return true;
152 }
153 
154 static void
155 spdk_nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
156 {
157 	struct spdk_nvmf_request *req = arg;
158 
159 	spdk_nvmf_ctrlr_process_io_cmd(req);
160 }
161 
162 static void
163 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
164 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
165 {
166 	int rc;
167 
168 	req->bdev_io_wait.bdev = bdev;
169 	req->bdev_io_wait.cb_fn = cb_fn;
170 	req->bdev_io_wait.cb_arg = cb_arg;
171 
172 	rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
173 	if (rc != 0) {
174 		assert(false);
175 	}
176 }
177 
178 static int
179 nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
180 			 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
181 {
182 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
183 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
184 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
185 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
186 	uint64_t start_lba;
187 	uint64_t num_blocks;
188 	int rc;
189 
190 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
191 
192 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
193 		SPDK_ERRLOG("end of media\n");
194 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
195 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
196 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
197 	}
198 
199 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
200 		SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
201 			    num_blocks, block_size, req->length);
202 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
203 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
204 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
205 	}
206 
207 	rc = spdk_bdev_readv_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
208 				    nvmf_bdev_ctrlr_complete_cmd, req);
209 	if (spdk_unlikely(rc)) {
210 		if (rc == -ENOMEM) {
211 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
212 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
213 		}
214 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
215 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
216 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
217 	}
218 
219 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
220 }
221 
222 static int
223 nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
224 			  struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
225 {
226 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
227 	uint32_t block_size = spdk_bdev_get_block_size(bdev);
228 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
229 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
230 	uint64_t start_lba;
231 	uint64_t num_blocks;
232 	int rc;
233 
234 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
235 
236 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
237 		SPDK_ERRLOG("end of media\n");
238 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
239 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
240 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
241 	}
242 
243 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
244 		SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
245 			    num_blocks, block_size, req->length);
246 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
247 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
248 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
249 	}
250 
251 	rc = spdk_bdev_writev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
252 				     nvmf_bdev_ctrlr_complete_cmd, req);
253 	if (spdk_unlikely(rc)) {
254 		if (rc == -ENOMEM) {
255 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
256 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
257 		}
258 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
259 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
260 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
261 	}
262 
263 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
264 }
265 
266 static int
267 nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
268 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
269 {
270 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
271 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
272 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
273 	uint64_t start_lba;
274 	uint64_t num_blocks;
275 	int rc;
276 
277 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
278 
279 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
280 		SPDK_ERRLOG("end of media\n");
281 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
282 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
283 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
284 	}
285 
286 	rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
287 					   nvmf_bdev_ctrlr_complete_cmd, req);
288 	if (spdk_unlikely(rc)) {
289 		if (rc == -ENOMEM) {
290 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
291 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
292 		}
293 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
294 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
295 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
296 	}
297 
298 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
299 }
300 
301 static int
302 nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
303 			  struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
304 {
305 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
306 	int rc;
307 
308 	/* As for NVMeoF controller, SPDK always set volatile write
309 	 * cache bit to 1, return success for those block devices
310 	 * which can't support FLUSH command.
311 	 */
312 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
313 		response->status.sct = SPDK_NVME_SCT_GENERIC;
314 		response->status.sc = SPDK_NVME_SC_SUCCESS;
315 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
316 	}
317 
318 	rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
319 				    nvmf_bdev_ctrlr_complete_cmd, req);
320 	if (spdk_unlikely(rc)) {
321 		if (rc == -ENOMEM) {
322 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
323 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
324 		}
325 		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
326 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
327 	}
328 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
329 }
330 
331 struct nvmf_virtual_ctrlr_unmap {
332 	struct spdk_nvmf_request	*req;
333 	uint32_t			count;
334 	struct spdk_bdev_desc		*desc;
335 	struct spdk_bdev		*bdev;
336 	struct spdk_io_channel		*ch;
337 };
338 
339 static void
340 nvmf_virtual_ctrlr_dsm_cpl(struct spdk_bdev_io *bdev_io, bool success,
341 			   void *cb_arg)
342 {
343 	struct nvmf_virtual_ctrlr_unmap *unmap_ctx = cb_arg;
344 	struct spdk_nvmf_request	*req = unmap_ctx->req;
345 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
346 	int				sc, sct;
347 
348 	unmap_ctx->count--;
349 
350 	if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
351 	    response->status.sc == SPDK_NVME_SC_SUCCESS) {
352 		spdk_bdev_io_get_nvme_status(bdev_io, &sct, &sc);
353 		response->status.sc = sc;
354 		response->status.sct = sct;
355 	}
356 
357 	if (unmap_ctx->count == 0) {
358 		spdk_nvmf_request_complete(req);
359 		free(unmap_ctx);
360 	}
361 	spdk_bdev_free_io(bdev_io);
362 }
363 
364 static int
365 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
366 			struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
367 			struct nvmf_virtual_ctrlr_unmap *unmap_ctx);
368 static void
369 nvmf_bdev_ctrlr_dsm_cmd_resubmit(void *arg)
370 {
371 	struct nvmf_virtual_ctrlr_unmap *unmap_ctx = arg;
372 	struct spdk_nvmf_request *req = unmap_ctx->req;
373 	struct spdk_bdev_desc *desc = unmap_ctx->desc;
374 	struct spdk_bdev *bdev = unmap_ctx->bdev;
375 	struct spdk_io_channel *ch = unmap_ctx->ch;
376 
377 	nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req, unmap_ctx);
378 }
379 
380 static int
381 nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
382 			struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
383 			struct nvmf_virtual_ctrlr_unmap *unmap_ctx)
384 {
385 	uint32_t attribute;
386 	uint16_t nr, i;
387 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
388 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
389 	int rc;
390 
391 	nr = ((cmd->cdw10 & 0x000000ff) + 1);
392 	if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
393 		SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
394 		response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
395 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
396 	}
397 
398 	attribute = cmd->cdw11 & 0x00000007;
399 	if (attribute & SPDK_NVME_DSM_ATTR_DEALLOCATE) {
400 		struct spdk_nvme_dsm_range *dsm_range;
401 		uint64_t lba;
402 		uint32_t lba_count;
403 
404 		if (unmap_ctx == NULL) {
405 			unmap_ctx = calloc(1, sizeof(*unmap_ctx));
406 			if (!unmap_ctx) {
407 				response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
408 				return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
409 			}
410 
411 			unmap_ctx->req = req;
412 			unmap_ctx->desc = desc;
413 			unmap_ctx->ch = ch;
414 		}
415 
416 		response->status.sct = SPDK_NVME_SCT_GENERIC;
417 		response->status.sc = SPDK_NVME_SC_SUCCESS;
418 
419 		dsm_range = (struct spdk_nvme_dsm_range *)req->data;
420 		for (i = unmap_ctx->count; i < nr; i++) {
421 			lba = dsm_range[i].starting_lba;
422 			lba_count = dsm_range[i].length;
423 
424 			unmap_ctx->count++;
425 
426 			rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
427 						    nvmf_virtual_ctrlr_dsm_cpl, unmap_ctx);
428 			if (rc) {
429 				if (rc == -ENOMEM) {
430 					nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_dsm_cmd_resubmit, unmap_ctx);
431 					/* Unmap was not yet submitted to bdev */
432 					unmap_ctx->count--;
433 					return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
434 				}
435 				response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
436 				unmap_ctx->count--;
437 				/* We can't return here - we may have to wait for any other
438 				 * unmaps already sent to complete */
439 				break;
440 			}
441 		}
442 
443 		if (unmap_ctx->count == 0) {
444 			free(unmap_ctx);
445 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
446 		}
447 
448 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
449 	}
450 
451 	response->status.sct = SPDK_NVME_SCT_GENERIC;
452 	response->status.sc = SPDK_NVME_SC_SUCCESS;
453 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
454 }
455 
456 static int
457 nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
458 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
459 {
460 	int rc;
461 
462 	rc = spdk_bdev_nvme_io_passthru(desc, ch, &req->cmd->nvme_cmd, req->data, req->length,
463 					nvmf_bdev_ctrlr_complete_cmd, req);
464 	if (spdk_unlikely(rc)) {
465 		if (rc == -ENOMEM) {
466 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, spdk_nvmf_ctrlr_process_io_cmd_resubmit, req);
467 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
468 		}
469 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
470 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
471 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
472 	}
473 
474 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
475 }
476 
477 int
478 spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
479 {
480 	uint32_t nsid;
481 	struct spdk_nvmf_ns *ns;
482 	struct spdk_bdev *bdev;
483 	struct spdk_bdev_desc *desc;
484 	struct spdk_io_channel *ch;
485 	struct spdk_nvmf_poll_group *group = req->qpair->group;
486 	struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
487 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
488 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
489 
490 	/* pre-set response details for this command */
491 	response->status.sc = SPDK_NVME_SC_SUCCESS;
492 	nsid = cmd->nsid;
493 
494 	if (spdk_unlikely(ctrlr == NULL)) {
495 		SPDK_ERRLOG("I/O command sent before CONNECT\n");
496 		response->status.sct = SPDK_NVME_SCT_GENERIC;
497 		response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
498 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
499 	}
500 
501 	if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) {
502 		SPDK_ERRLOG("I/O command sent to disabled controller\n");
503 		response->status.sct = SPDK_NVME_SCT_GENERIC;
504 		response->status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
505 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
506 	}
507 
508 	ns = _spdk_nvmf_subsystem_get_ns(ctrlr->subsys, nsid);
509 	if (ns == NULL || ns->bdev == NULL) {
510 		SPDK_ERRLOG("Unsuccessful query for nsid %u\n", cmd->nsid);
511 		response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
512 		response->status.dnr = 1;
513 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
514 	}
515 
516 	bdev = ns->bdev;
517 	desc = ns->desc;
518 	ch = group->sgroups[ctrlr->subsys->id].channels[nsid - 1];
519 	switch (cmd->opc) {
520 	case SPDK_NVME_OPC_READ:
521 		return nvmf_bdev_ctrlr_read_cmd(bdev, desc, ch, req);
522 	case SPDK_NVME_OPC_WRITE:
523 		return nvmf_bdev_ctrlr_write_cmd(bdev, desc, ch, req);
524 	case SPDK_NVME_OPC_WRITE_ZEROES:
525 		return nvmf_bdev_ctrlr_write_zeroes_cmd(bdev, desc, ch, req);
526 	case SPDK_NVME_OPC_FLUSH:
527 		return nvmf_bdev_ctrlr_flush_cmd(bdev, desc, ch, req);
528 	case SPDK_NVME_OPC_DATASET_MANAGEMENT:
529 		return nvmf_bdev_ctrlr_dsm_cmd(bdev, desc, ch, req, NULL);
530 	default:
531 		return nvmf_bdev_ctrlr_nvme_passthru_io(bdev, desc, ch, req);
532 	}
533 }
534