xref: /spdk/lib/nvmf/ctrlr_bdev.c (revision 2e10c84c822790902c20cbe1ae21fdaeff91a220)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2017 Intel Corporation. All rights reserved.
39d503727SEvgeniy Kochetov  *   Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
4d03b31c6SEvgeniy Kochetov  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5f6e62d2cSBen Walker  */
6f6e62d2cSBen Walker 
7f6e62d2cSBen Walker #include "spdk/stdinc.h"
8f6e62d2cSBen Walker 
97c6ca978SBen Walker #include "nvmf_internal.h"
10f6e62d2cSBen Walker 
11f6e62d2cSBen Walker #include "spdk/bdev.h"
12f6e62d2cSBen Walker #include "spdk/endian.h"
13a83f91c2SBen Walker #include "spdk/thread.h"
145eb12964SDaniel Verkamp #include "spdk/likely.h"
15f6e62d2cSBen Walker #include "spdk/nvme.h"
16ab945f32SBen Walker #include "spdk/nvmf_cmd.h"
17f6e62d2cSBen Walker #include "spdk/nvmf_spec.h"
18f6e62d2cSBen Walker #include "spdk/trace.h"
19f6e62d2cSBen Walker #include "spdk/scsi_spec.h"
20f6e62d2cSBen Walker #include "spdk/string.h"
21f6e62d2cSBen Walker #include "spdk/util.h"
22f6e62d2cSBen Walker 
234e8e97c8STomasz Zawadzki #include "spdk/log.h"
24f6e62d2cSBen Walker 
250caab4e1SDaniel Verkamp static bool
26ce42d9d1SSeth Howell nvmf_subsystem_bdev_io_type_supported(struct spdk_nvmf_subsystem *subsystem,
270caab4e1SDaniel Verkamp 				      enum spdk_bdev_io_type io_type)
28f6e62d2cSBen Walker {
2914451d76SDaniel Verkamp 	struct spdk_nvmf_ns *ns;
30f6e62d2cSBen Walker 
3114451d76SDaniel Verkamp 	for (ns = spdk_nvmf_subsystem_get_first_ns(subsystem); ns != NULL;
3214451d76SDaniel Verkamp 	     ns = spdk_nvmf_subsystem_get_next_ns(subsystem, ns)) {
3314451d76SDaniel Verkamp 		if (ns->bdev == NULL) {
34f6e62d2cSBen Walker 			continue;
35f6e62d2cSBen Walker 		}
36f6e62d2cSBen Walker 
370caab4e1SDaniel Verkamp 		if (!spdk_bdev_io_type_supported(ns->bdev, io_type)) {
382172c432STomasz Zawadzki 			SPDK_DEBUGLOG(nvmf,
390caab4e1SDaniel Verkamp 				      "Subsystem %s namespace %u (%s) does not support io_type %d\n",
4014451d76SDaniel Verkamp 				      spdk_nvmf_subsystem_get_nqn(subsystem),
41250d342bSDaniel Verkamp 				      ns->opts.nsid, spdk_bdev_get_name(ns->bdev), (int)io_type);
42bf6caa75SDaniel Verkamp 			return false;
43f6e62d2cSBen Walker 		}
44f6e62d2cSBen Walker 	}
45f6e62d2cSBen Walker 
462172c432STomasz Zawadzki 	SPDK_DEBUGLOG(nvmf, "All devices in Subsystem %s support io_type %d\n",
470caab4e1SDaniel Verkamp 		      spdk_nvmf_subsystem_get_nqn(subsystem), (int)io_type);
48bf6caa75SDaniel Verkamp 	return true;
49f6e62d2cSBen Walker }
50f6e62d2cSBen Walker 
510caab4e1SDaniel Verkamp bool
529cb21ad6SSeth Howell nvmf_ctrlr_dsm_supported(struct spdk_nvmf_ctrlr *ctrlr)
530caab4e1SDaniel Verkamp {
54ce42d9d1SSeth Howell 	return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_UNMAP);
550caab4e1SDaniel Verkamp }
560caab4e1SDaniel Verkamp 
570caab4e1SDaniel Verkamp bool
589cb21ad6SSeth Howell nvmf_ctrlr_write_zeroes_supported(struct spdk_nvmf_ctrlr *ctrlr)
590caab4e1SDaniel Verkamp {
60ce42d9d1SSeth Howell 	return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
610caab4e1SDaniel Verkamp }
620caab4e1SDaniel Verkamp 
638305e49bSEvgeniy Kochetov bool
648305e49bSEvgeniy Kochetov nvmf_ctrlr_copy_supported(struct spdk_nvmf_ctrlr *ctrlr)
658305e49bSEvgeniy Kochetov {
668305e49bSEvgeniy Kochetov 	return nvmf_subsystem_bdev_io_type_supported(ctrlr->subsys, SPDK_BDEV_IO_TYPE_COPY);
678305e49bSEvgeniy Kochetov }
688305e49bSEvgeniy Kochetov 
69f6e62d2cSBen Walker static void
70f6e62d2cSBen Walker nvmf_bdev_ctrlr_complete_cmd(struct spdk_bdev_io *bdev_io, bool success,
71f6e62d2cSBen Walker 			     void *cb_arg)
72f6e62d2cSBen Walker {
73f6e62d2cSBen Walker 	struct spdk_nvmf_request	*req = cb_arg;
74f6e62d2cSBen Walker 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
75a2c540e3SJacek Kalwas 	int				sc = 0, sct = 0;
76a2606d4bSMaciej Szwed 	uint32_t			cdw0 = 0;
77a2c540e3SJacek Kalwas 
78a2c540e3SJacek Kalwas 	if (spdk_unlikely(req->first_fused)) {
7967c9c1c5STomasz Kulasek 		struct spdk_nvmf_request	*first_req = req->first_fused_req;
8067ac0638SJim Harris 		struct spdk_nvme_cpl		*first_response = &first_req->rsp->nvme_cpl;
81a2c540e3SJacek Kalwas 		int				first_sc = 0, first_sct = 0;
8267c9c1c5STomasz Kulasek 
83a2c540e3SJacek Kalwas 		/* get status for both operations */
8467ac0638SJim Harris 		spdk_bdev_io_get_nvme_fused_status(bdev_io, &cdw0, &first_sct, &first_sc, &sct, &sc);
8567ac0638SJim Harris 		first_response->cdw0 = cdw0;
8667ac0638SJim Harris 		first_response->status.sc = first_sc;
8767ac0638SJim Harris 		first_response->status.sct = first_sct;
8867c9c1c5STomasz Kulasek 
8967c9c1c5STomasz Kulasek 		/* first request should be completed */
9067c9c1c5STomasz Kulasek 		spdk_nvmf_request_complete(first_req);
9167c9c1c5STomasz Kulasek 		req->first_fused_req = NULL;
92a2c540e3SJacek Kalwas 		req->first_fused = false;
9367c9c1c5STomasz Kulasek 	} else {
9467ac0638SJim Harris 		spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
9567c9c1c5STomasz Kulasek 	}
9667c9c1c5STomasz Kulasek 
9753b92a6cSMichael Haeuptle 	response->cdw0 = cdw0;
9867ac0638SJim Harris 	response->status.sc = sc;
9967ac0638SJim Harris 	response->status.sct = sct;
100f6e62d2cSBen Walker 
101f6e62d2cSBen Walker 	spdk_nvmf_request_complete(req);
102f6e62d2cSBen Walker 	spdk_bdev_free_io(bdev_io);
103f6e62d2cSBen Walker }
104f6e62d2cSBen Walker 
1053fa22056SMichael Haeuptle static void
1063fa22056SMichael Haeuptle nvmf_bdev_ctrlr_complete_admin_cmd(struct spdk_bdev_io *bdev_io, bool success,
1073fa22056SMichael Haeuptle 				   void *cb_arg)
1083fa22056SMichael Haeuptle {
1093fa22056SMichael Haeuptle 	struct spdk_nvmf_request *req = cb_arg;
1103fa22056SMichael Haeuptle 
1113fa22056SMichael Haeuptle 	if (req->cmd_cb_fn) {
1123fa22056SMichael Haeuptle 		req->cmd_cb_fn(req);
1133fa22056SMichael Haeuptle 	}
1143fa22056SMichael Haeuptle 
1153fa22056SMichael Haeuptle 	nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req);
1163fa22056SMichael Haeuptle }
1173fa22056SMichael Haeuptle 
118ddda03efSPiotr Pelplinski void
1199cb21ad6SSeth Howell nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
1201c7f92f0SShuhei Matsumoto 			    bool dif_insert_or_strip)
121f6e62d2cSBen Walker {
1221023ca7bSDaniel Verkamp 	struct spdk_bdev *bdev = ns->bdev;
123b09de013SShuhei Matsumoto 	struct spdk_bdev_desc *desc = ns->desc;
124f6e62d2cSBen Walker 	uint64_t num_blocks;
1256cebe9d0SSwapnil Ingle 	uint32_t phys_blocklen;
1268305e49bSEvgeniy Kochetov 	uint32_t max_copy;
127f6e62d2cSBen Walker 
128f6e62d2cSBen Walker 	num_blocks = spdk_bdev_get_num_blocks(bdev);
129f6e62d2cSBen Walker 
130f6e62d2cSBen Walker 	nsdata->nsze = num_blocks;
131f6e62d2cSBen Walker 	nsdata->ncap = num_blocks;
132f6e62d2cSBen Walker 	nsdata->nuse = num_blocks;
133f6e62d2cSBen Walker 	nsdata->nlbaf = 0;
134f6e62d2cSBen Walker 	nsdata->flbas.format = 0;
1357bbeb80aSAnkit Kumar 	nsdata->flbas.msb_format = 0;
13681a3b8a5SJim Harris 	nsdata->nacwu = spdk_bdev_get_acwu(bdev) - 1; /* nacwu is 0-based */
1371c7f92f0SShuhei Matsumoto 	if (!dif_insert_or_strip) {
138b09de013SShuhei Matsumoto 		nsdata->lbaf[0].ms = spdk_bdev_desc_get_md_size(desc);
139b09de013SShuhei Matsumoto 		nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_desc_get_block_size(desc));
140df99e281SShuhei Matsumoto 		if (nsdata->lbaf[0].ms != 0) {
141df99e281SShuhei Matsumoto 			nsdata->flbas.extended = 1;
142df99e281SShuhei Matsumoto 			nsdata->mc.extended = 1;
143df99e281SShuhei Matsumoto 			nsdata->mc.pointer = 0;
144b09de013SShuhei Matsumoto 			nsdata->dps.md_start = spdk_bdev_desc_is_dif_head_of_md(desc);
145*2e10c84cSShuhei Matsumoto 
146*2e10c84cSShuhei Matsumoto 			switch (spdk_bdev_get_dif_type(bdev)) {
147*2e10c84cSShuhei Matsumoto 			case SPDK_DIF_TYPE1:
148*2e10c84cSShuhei Matsumoto 				nsdata->dpc.pit1 = 1;
149*2e10c84cSShuhei Matsumoto 				nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE1;
150*2e10c84cSShuhei Matsumoto 				break;
151*2e10c84cSShuhei Matsumoto 			case SPDK_DIF_TYPE2:
152*2e10c84cSShuhei Matsumoto 				nsdata->dpc.pit2 = 1;
153*2e10c84cSShuhei Matsumoto 				nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE2;
154*2e10c84cSShuhei Matsumoto 				break;
155*2e10c84cSShuhei Matsumoto 			case SPDK_DIF_TYPE3:
156*2e10c84cSShuhei Matsumoto 				nsdata->dpc.pit3 = 1;
157*2e10c84cSShuhei Matsumoto 				nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_TYPE3;
158*2e10c84cSShuhei Matsumoto 				break;
159*2e10c84cSShuhei Matsumoto 			default:
160*2e10c84cSShuhei Matsumoto 				SPDK_DEBUGLOG(nvmf, "Protection Disabled\n");
1613f442687SChangpeng Liu 				nsdata->dps.pit = SPDK_NVME_FMT_NVM_PROTECTION_DISABLE;
162*2e10c84cSShuhei Matsumoto 				break;
163*2e10c84cSShuhei Matsumoto 			}
164df99e281SShuhei Matsumoto 		}
1651c7f92f0SShuhei Matsumoto 	} else {
1661c7f92f0SShuhei Matsumoto 		nsdata->lbaf[0].ms = 0;
1671c7f92f0SShuhei Matsumoto 		nsdata->lbaf[0].lbads = spdk_u32log2(spdk_bdev_get_data_block_size(bdev));
1681c7f92f0SShuhei Matsumoto 	}
1696cebe9d0SSwapnil Ingle 
1706cebe9d0SSwapnil Ingle 	phys_blocklen = spdk_bdev_get_physical_block_size(bdev);
1716cebe9d0SSwapnil Ingle 	assert(phys_blocklen > 0);
1726cebe9d0SSwapnil Ingle 	/* Linux driver uses min(nawupf, npwg) to set physical_block_size */
1736cebe9d0SSwapnil Ingle 	nsdata->nsfeat.optperf = 1;
1746cebe9d0SSwapnil Ingle 	nsdata->nsfeat.ns_atomic_write_unit = 1;
1756cebe9d0SSwapnil Ingle 	nsdata->npwg = (phys_blocklen >> nsdata->lbaf[0].lbads) - 1;
1766cebe9d0SSwapnil Ingle 	nsdata->nawupf = nsdata->npwg;
17713e5be0bSSwapnil Ingle 	nsdata->npwa = nsdata->npwg;
17813e5be0bSSwapnil Ingle 	nsdata->npdg = nsdata->npwg;
17913e5be0bSSwapnil Ingle 	nsdata->npda = nsdata->npwg;
1806cebe9d0SSwapnil Ingle 
181fed1f52bSArtur Paszkiewicz 	if (spdk_bdev_get_write_unit_size(bdev) == 1) {
182d7e194f2SDaniel Verkamp 		nsdata->noiob = spdk_bdev_get_optimal_io_boundary(bdev);
183fed1f52bSArtur Paszkiewicz 	}
1840f99e7abSDaniel Verkamp 	nsdata->nmic.can_share = 1;
185c6a78e83SArtur Paszkiewicz 	if (nvmf_ns_is_ptpl_capable(ns)) {
186cf5c4a8aSChangpeng Liu 		nsdata->nsrescap.rescap.persist = 1;
187cf5c4a8aSChangpeng Liu 	}
188e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.write_exclusive = 1;
189e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.exclusive_access = 1;
190e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.write_exclusive_reg_only = 1;
191e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.exclusive_access_reg_only = 1;
192e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.write_exclusive_all_reg = 1;
193e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.exclusive_access_all_reg = 1;
194e39b4d6cSChangpeng Liu 	nsdata->nsrescap.rescap.ignore_existing_key = 1;
195f6e62d2cSBen Walker 
1961023ca7bSDaniel Verkamp 	SPDK_STATIC_ASSERT(sizeof(nsdata->nguid) == sizeof(ns->opts.nguid), "size mismatch");
1971023ca7bSDaniel Verkamp 	memcpy(nsdata->nguid, ns->opts.nguid, sizeof(nsdata->nguid));
1981023ca7bSDaniel Verkamp 
1991023ca7bSDaniel Verkamp 	SPDK_STATIC_ASSERT(sizeof(nsdata->eui64) == sizeof(ns->opts.eui64), "size mismatch");
2001023ca7bSDaniel Verkamp 	memcpy(&nsdata->eui64, ns->opts.eui64, sizeof(nsdata->eui64));
2018305e49bSEvgeniy Kochetov 
20286136540SRui Chang 	/* For now we support just one source range for copy command */
20386136540SRui Chang 	nsdata->msrc = 0;
20486136540SRui Chang 
2058305e49bSEvgeniy Kochetov 	max_copy = spdk_bdev_get_max_copy(bdev);
2068305e49bSEvgeniy Kochetov 	if (max_copy == 0 || max_copy > UINT16_MAX) {
2078305e49bSEvgeniy Kochetov 		/* Zero means copy size is unlimited */
2088305e49bSEvgeniy Kochetov 		nsdata->mcl = UINT16_MAX;
2098305e49bSEvgeniy Kochetov 		nsdata->mssrl = UINT16_MAX;
2108305e49bSEvgeniy Kochetov 	} else {
2118305e49bSEvgeniy Kochetov 		nsdata->mcl = max_copy;
2128305e49bSEvgeniy Kochetov 		nsdata->mssrl = max_copy;
2138305e49bSEvgeniy Kochetov 	}
214f6e62d2cSBen Walker }
215f6e62d2cSBen Walker 
2165e4d957eSShuhei Matsumoto void
2175e4d957eSShuhei Matsumoto nvmf_bdev_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ns *ns,
2185e4d957eSShuhei Matsumoto 				  struct spdk_nvme_nvm_ns_data *nsdata_nvm)
2195e4d957eSShuhei Matsumoto {
220b09de013SShuhei Matsumoto 	struct spdk_bdev_desc *desc = ns->desc;
2215e4d957eSShuhei Matsumoto 	uint8_t _16bpists;
2225e4d957eSShuhei Matsumoto 	uint32_t sts, pif;
2235e4d957eSShuhei Matsumoto 
224b09de013SShuhei Matsumoto 	if (spdk_bdev_desc_get_dif_type(desc) == SPDK_DIF_DISABLE) {
2255e4d957eSShuhei Matsumoto 		return;
2265e4d957eSShuhei Matsumoto 	}
2275e4d957eSShuhei Matsumoto 
228b09de013SShuhei Matsumoto 	pif = spdk_bdev_desc_get_dif_pi_format(desc);
2295e4d957eSShuhei Matsumoto 
2305e4d957eSShuhei Matsumoto 	/*
2315e4d957eSShuhei Matsumoto 	 * 16BPISTS shall be 1 for 32/64b Guard PI.
2325e4d957eSShuhei Matsumoto 	 * STCRS shall be 1 if 16BPISTS is 1.
2335e4d957eSShuhei Matsumoto 	 * 16 is the minimum value of STS for 32b Guard PI.
2345e4d957eSShuhei Matsumoto 	 */
2355e4d957eSShuhei Matsumoto 	switch (pif) {
2365e4d957eSShuhei Matsumoto 	case SPDK_DIF_PI_FORMAT_16:
2375e4d957eSShuhei Matsumoto 		_16bpists = 0;
2385e4d957eSShuhei Matsumoto 		sts = 0;
2395e4d957eSShuhei Matsumoto 		break;
2405e4d957eSShuhei Matsumoto 	case SPDK_DIF_PI_FORMAT_32:
2415e4d957eSShuhei Matsumoto 		_16bpists = 1;
2425e4d957eSShuhei Matsumoto 		sts = 16;
2435e4d957eSShuhei Matsumoto 		break;
2445e4d957eSShuhei Matsumoto 	case SPDK_DIF_PI_FORMAT_64:
2455e4d957eSShuhei Matsumoto 		_16bpists = 1;
2465e4d957eSShuhei Matsumoto 		sts = 0;
2475e4d957eSShuhei Matsumoto 		break;
2485e4d957eSShuhei Matsumoto 	default:
2495e4d957eSShuhei Matsumoto 		SPDK_WARNLOG("PI format %u is not supported\n", pif);
2505e4d957eSShuhei Matsumoto 		return;
2515e4d957eSShuhei Matsumoto 	}
2525e4d957eSShuhei Matsumoto 
2535e4d957eSShuhei Matsumoto 	/* For 16b Guard PI, Storage Tag is not available because we set STS to 0.
2545e4d957eSShuhei Matsumoto 	 * In this case, we do not have to set 16BPISTM to 1. For simplicity,
2555e4d957eSShuhei Matsumoto 	 * set 16BPISTM to 0 and set LBSTM to all zeroes.
2565e4d957eSShuhei Matsumoto 	 *
2575e4d957eSShuhei Matsumoto 	 * We will revisit here when we find any OS uses Storage Tag.
2585e4d957eSShuhei Matsumoto 	 */
2595e4d957eSShuhei Matsumoto 	nsdata_nvm->lbstm = 0;
2605e4d957eSShuhei Matsumoto 	nsdata_nvm->pic._16bpistm = 0;
2615e4d957eSShuhei Matsumoto 
2625e4d957eSShuhei Matsumoto 	nsdata_nvm->pic._16bpists = _16bpists;
2635e4d957eSShuhei Matsumoto 	nsdata_nvm->pic.stcrs = 0;
2645e4d957eSShuhei Matsumoto 	nsdata_nvm->elbaf[0].sts = sts;
2655e4d957eSShuhei Matsumoto 	nsdata_nvm->elbaf[0].pif = pif;
2665e4d957eSShuhei Matsumoto }
2675e4d957eSShuhei Matsumoto 
2685eb12964SDaniel Verkamp static void
2695eb12964SDaniel Verkamp nvmf_bdev_ctrlr_get_rw_params(const struct spdk_nvme_cmd *cmd, uint64_t *start_lba,
2705eb12964SDaniel Verkamp 			      uint64_t *num_blocks)
2715eb12964SDaniel Verkamp {
2725eb12964SDaniel Verkamp 	/* SLBA: CDW10 and CDW11 */
2735eb12964SDaniel Verkamp 	*start_lba = from_le64(&cmd->cdw10);
2745eb12964SDaniel Verkamp 
2755eb12964SDaniel Verkamp 	/* NLB: CDW12 bits 15:00, 0's based */
2765eb12964SDaniel Verkamp 	*num_blocks = (from_le32(&cmd->cdw12) & 0xFFFFu) + 1;
2775eb12964SDaniel Verkamp }
2785eb12964SDaniel Verkamp 
2793dbaa93cSAnkit Kumar static void
2803dbaa93cSAnkit Kumar nvmf_bdev_ctrlr_get_rw_ext_params(const struct spdk_nvme_cmd *cmd,
2813dbaa93cSAnkit Kumar 				  struct spdk_bdev_ext_io_opts *opts)
2823dbaa93cSAnkit Kumar {
2833dbaa93cSAnkit Kumar 	/* Get CDW12 values */
2843dbaa93cSAnkit Kumar 	opts->nvme_cdw12.raw = from_le32(&cmd->cdw12);
2853dbaa93cSAnkit Kumar 
2863dbaa93cSAnkit Kumar 	/* Get CDW13 values */
2873dbaa93cSAnkit Kumar 	opts->nvme_cdw13.raw = from_le32(&cmd->cdw13);
28838b931b2SShuhei Matsumoto 
28938b931b2SShuhei Matsumoto 	/* Bdev layer checks PRACT in CDW12 because it is NVMe specific, but
29038b931b2SShuhei Matsumoto 	 * it does not check DIF check flags in CDW because DIF is not NVMe
29138b931b2SShuhei Matsumoto 	 * specific. Hence, copy DIF check flags from CDW12 to dif_check_flags_exclude_mask.
29238b931b2SShuhei Matsumoto 	 */
29338b931b2SShuhei Matsumoto 	opts->dif_check_flags_exclude_mask = (~opts->nvme_cdw12.raw) & SPDK_NVME_IO_FLAGS_PRCHK_MASK;
2943dbaa93cSAnkit Kumar }
2953dbaa93cSAnkit Kumar 
2965eb12964SDaniel Verkamp static bool
2975eb12964SDaniel Verkamp nvmf_bdev_ctrlr_lba_in_range(uint64_t bdev_num_blocks, uint64_t io_start_lba,
2985eb12964SDaniel Verkamp 			     uint64_t io_num_blocks)
2995eb12964SDaniel Verkamp {
3005eb12964SDaniel Verkamp 	if (io_start_lba + io_num_blocks > bdev_num_blocks ||
3015eb12964SDaniel Verkamp 	    io_start_lba + io_num_blocks < io_start_lba) {
3025eb12964SDaniel Verkamp 		return false;
3035eb12964SDaniel Verkamp 	}
3045eb12964SDaniel Verkamp 
3055eb12964SDaniel Verkamp 	return true;
3065eb12964SDaniel Verkamp }
3075eb12964SDaniel Verkamp 
308d9b3149eSTomasz Zawadzki static void
309ce42d9d1SSeth Howell nvmf_ctrlr_process_io_cmd_resubmit(void *arg)
310d9b3149eSTomasz Zawadzki {
311d9b3149eSTomasz Zawadzki 	struct spdk_nvmf_request *req = arg;
312d88fa8c1SKonrad Sztyber 	int rc;
313d9b3149eSTomasz Zawadzki 
314d88fa8c1SKonrad Sztyber 	rc = nvmf_ctrlr_process_io_cmd(req);
315d88fa8c1SKonrad Sztyber 	if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
316d88fa8c1SKonrad Sztyber 		spdk_nvmf_request_complete(req);
317d88fa8c1SKonrad Sztyber 	}
318d9b3149eSTomasz Zawadzki }
319d9b3149eSTomasz Zawadzki 
320d9b3149eSTomasz Zawadzki static void
321ce42d9d1SSeth Howell nvmf_ctrlr_process_admin_cmd_resubmit(void *arg)
3223fa22056SMichael Haeuptle {
3233fa22056SMichael Haeuptle 	struct spdk_nvmf_request *req = arg;
324d88fa8c1SKonrad Sztyber 	int rc;
3253fa22056SMichael Haeuptle 
326d88fa8c1SKonrad Sztyber 	rc = nvmf_ctrlr_process_admin_cmd(req);
327d88fa8c1SKonrad Sztyber 	if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
328d88fa8c1SKonrad Sztyber 		spdk_nvmf_request_complete(req);
329d88fa8c1SKonrad Sztyber 	}
3303fa22056SMichael Haeuptle }
3313fa22056SMichael Haeuptle 
3323fa22056SMichael Haeuptle static void
333d9b3149eSTomasz Zawadzki nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
334d9b3149eSTomasz Zawadzki 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn, void *cb_arg)
335d9b3149eSTomasz Zawadzki {
336d9b3149eSTomasz Zawadzki 	int rc;
337d9b3149eSTomasz Zawadzki 
338d9b3149eSTomasz Zawadzki 	req->bdev_io_wait.bdev = bdev;
339d9b3149eSTomasz Zawadzki 	req->bdev_io_wait.cb_fn = cb_fn;
340d9b3149eSTomasz Zawadzki 	req->bdev_io_wait.cb_arg = cb_arg;
341d9b3149eSTomasz Zawadzki 
342d9b3149eSTomasz Zawadzki 	rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
343d9b3149eSTomasz Zawadzki 	if (rc != 0) {
344d9b3149eSTomasz Zawadzki 		assert(false);
345d9b3149eSTomasz Zawadzki 	}
3469d503727SEvgeniy Kochetov 	req->qpair->group->stat.pending_bdev_io++;
347d9b3149eSTomasz Zawadzki }
348d9b3149eSTomasz Zawadzki 
3495818b42fSmatthewb bool
3505818b42fSmatthewb nvmf_bdev_zcopy_enabled(struct spdk_bdev *bdev)
3515818b42fSmatthewb {
3525818b42fSmatthewb 	return spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_ZCOPY);
3535818b42fSmatthewb }
3545818b42fSmatthewb 
3551b6b6cc4SBen Walker int
3569cb21ad6SSeth Howell nvmf_bdev_ctrlr_read_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
357f6e62d2cSBen Walker 			 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
358f6e62d2cSBen Walker {
3596f3b59beSJacek Kalwas 	struct spdk_bdev_ext_io_opts opts = {
3606f3b59beSJacek Kalwas 		.size = SPDK_SIZEOF(&opts, accel_sequence),
3616f3b59beSJacek Kalwas 		.memory_domain = req->memory_domain,
362c3077db6SJacek Kalwas 		.memory_domain_ctx = req->memory_domain_ctx,
363c3077db6SJacek Kalwas 		.accel_sequence = req->accel_sequence,
3646f3b59beSJacek Kalwas 	};
3655eb12964SDaniel Verkamp 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
366b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
367f6e62d2cSBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
3685eb12964SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
3695eb12964SDaniel Verkamp 	uint64_t start_lba;
3705eb12964SDaniel Verkamp 	uint64_t num_blocks;
371d9b3149eSTomasz Zawadzki 	int rc;
372f6e62d2cSBen Walker 
3735eb12964SDaniel Verkamp 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
37438b931b2SShuhei Matsumoto 	nvmf_bdev_ctrlr_get_rw_ext_params(cmd, &opts);
375f6e62d2cSBen Walker 
3765eb12964SDaniel Verkamp 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
377f6e62d2cSBen Walker 		SPDK_ERRLOG("end of media\n");
3785eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3795eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
380f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
381f6e62d2cSBen Walker 	}
382f6e62d2cSBen Walker 
3835eb12964SDaniel Verkamp 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
3845eb12964SDaniel Verkamp 		SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
3855eb12964SDaniel Verkamp 			    num_blocks, block_size, req->length);
3865eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
3875eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
388f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
389f6e62d2cSBen Walker 	}
390f6e62d2cSBen Walker 
391594ccf5aSKonrad Sztyber 	assert(!spdk_nvmf_request_using_zcopy(req));
3925818b42fSmatthewb 
3936f3b59beSJacek Kalwas 	rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
3946f3b59beSJacek Kalwas 					nvmf_bdev_ctrlr_complete_cmd, req, &opts);
395d9b3149eSTomasz Zawadzki 	if (spdk_unlikely(rc)) {
396d9b3149eSTomasz Zawadzki 		if (rc == -ENOMEM) {
397ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
398d9b3149eSTomasz Zawadzki 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
399d9b3149eSTomasz Zawadzki 		}
4005eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
4015eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
402f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
403f6e62d2cSBen Walker 	}
404f6e62d2cSBen Walker 
4055eb12964SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
4065eb12964SDaniel Verkamp }
4075eb12964SDaniel Verkamp 
4081b6b6cc4SBen Walker int
4099cb21ad6SSeth Howell nvmf_bdev_ctrlr_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
4105eb12964SDaniel Verkamp 			  struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
4115eb12964SDaniel Verkamp {
4126f3b59beSJacek Kalwas 	struct spdk_bdev_ext_io_opts opts = {
4133dbaa93cSAnkit Kumar 		.size = SPDK_SIZEOF(&opts, nvme_cdw13),
4146f3b59beSJacek Kalwas 		.memory_domain = req->memory_domain,
415c3077db6SJacek Kalwas 		.memory_domain_ctx = req->memory_domain_ctx,
416c3077db6SJacek Kalwas 		.accel_sequence = req->accel_sequence,
4176f3b59beSJacek Kalwas 	};
4185eb12964SDaniel Verkamp 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
419b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
4205eb12964SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
4215eb12964SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
4225eb12964SDaniel Verkamp 	uint64_t start_lba;
4235eb12964SDaniel Verkamp 	uint64_t num_blocks;
424d9b3149eSTomasz Zawadzki 	int rc;
4255eb12964SDaniel Verkamp 
4265eb12964SDaniel Verkamp 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
4273dbaa93cSAnkit Kumar 	nvmf_bdev_ctrlr_get_rw_ext_params(cmd, &opts);
4285eb12964SDaniel Verkamp 
4295eb12964SDaniel Verkamp 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
4305eb12964SDaniel Verkamp 		SPDK_ERRLOG("end of media\n");
4315eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
4325eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
4335eb12964SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
4345eb12964SDaniel Verkamp 	}
4355eb12964SDaniel Verkamp 
4365eb12964SDaniel Verkamp 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
4375eb12964SDaniel Verkamp 		SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
4385eb12964SDaniel Verkamp 			    num_blocks, block_size, req->length);
4395eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
4405eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
4415eb12964SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
4425eb12964SDaniel Verkamp 	}
4435eb12964SDaniel Verkamp 
444594ccf5aSKonrad Sztyber 	assert(!spdk_nvmf_request_using_zcopy(req));
4455818b42fSmatthewb 
4466f3b59beSJacek Kalwas 	rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
4476f3b59beSJacek Kalwas 					 nvmf_bdev_ctrlr_complete_cmd, req, &opts);
448d9b3149eSTomasz Zawadzki 	if (spdk_unlikely(rc)) {
449d9b3149eSTomasz Zawadzki 		if (rc == -ENOMEM) {
450ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
451d9b3149eSTomasz Zawadzki 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
452d9b3149eSTomasz Zawadzki 		}
4535eb12964SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
4545eb12964SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
4555eb12964SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
4565eb12964SDaniel Verkamp 	}
4575eb12964SDaniel Verkamp 
4585eb12964SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
459f6e62d2cSBen Walker }
460f6e62d2cSBen Walker 
4611b6b6cc4SBen Walker int
4629cb21ad6SSeth Howell nvmf_bdev_ctrlr_compare_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
463941d9e7aSMaciej Szwed 			    struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
464941d9e7aSMaciej Szwed {
465941d9e7aSMaciej Szwed 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
466b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
467941d9e7aSMaciej Szwed 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
468941d9e7aSMaciej Szwed 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
469941d9e7aSMaciej Szwed 	uint64_t start_lba;
470941d9e7aSMaciej Szwed 	uint64_t num_blocks;
471941d9e7aSMaciej Szwed 	int rc;
472941d9e7aSMaciej Szwed 
473941d9e7aSMaciej Szwed 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
474941d9e7aSMaciej Szwed 
475941d9e7aSMaciej Szwed 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
476941d9e7aSMaciej Szwed 		SPDK_ERRLOG("end of media\n");
477941d9e7aSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
478941d9e7aSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
479941d9e7aSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
480941d9e7aSMaciej Szwed 	}
481941d9e7aSMaciej Szwed 
482941d9e7aSMaciej Szwed 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
483941d9e7aSMaciej Szwed 		SPDK_ERRLOG("Compare NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
484941d9e7aSMaciej Szwed 			    num_blocks, block_size, req->length);
485941d9e7aSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
486941d9e7aSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
487941d9e7aSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
488941d9e7aSMaciej Szwed 	}
489941d9e7aSMaciej Szwed 
490941d9e7aSMaciej Szwed 	rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
491941d9e7aSMaciej Szwed 				       nvmf_bdev_ctrlr_complete_cmd, req);
492941d9e7aSMaciej Szwed 	if (spdk_unlikely(rc)) {
493941d9e7aSMaciej Szwed 		if (rc == -ENOMEM) {
494ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
495941d9e7aSMaciej Szwed 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
496941d9e7aSMaciej Szwed 		}
497941d9e7aSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
498941d9e7aSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
499941d9e7aSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
500941d9e7aSMaciej Szwed 	}
501941d9e7aSMaciej Szwed 
502941d9e7aSMaciej Szwed 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
503941d9e7aSMaciej Szwed }
504941d9e7aSMaciej Szwed 
505941d9e7aSMaciej Szwed int
5069cb21ad6SSeth Howell nvmf_bdev_ctrlr_compare_and_write_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
50705e7f56cSMaciej Szwed 				      struct spdk_io_channel *ch, struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req)
50805e7f56cSMaciej Szwed {
50905e7f56cSMaciej Szwed 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
510b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
51105e7f56cSMaciej Szwed 	struct spdk_nvme_cmd *cmp_cmd = &cmp_req->cmd->nvme_cmd;
51205e7f56cSMaciej Szwed 	struct spdk_nvme_cmd *write_cmd = &write_req->cmd->nvme_cmd;
51305e7f56cSMaciej Szwed 	struct spdk_nvme_cpl *rsp = &write_req->rsp->nvme_cpl;
51405e7f56cSMaciej Szwed 	uint64_t write_start_lba, cmp_start_lba;
51505e7f56cSMaciej Szwed 	uint64_t write_num_blocks, cmp_num_blocks;
51605e7f56cSMaciej Szwed 	int rc;
51705e7f56cSMaciej Szwed 
51805e7f56cSMaciej Szwed 	nvmf_bdev_ctrlr_get_rw_params(cmp_cmd, &cmp_start_lba, &cmp_num_blocks);
51905e7f56cSMaciej Szwed 	nvmf_bdev_ctrlr_get_rw_params(write_cmd, &write_start_lba, &write_num_blocks);
52005e7f56cSMaciej Szwed 
52105e7f56cSMaciej Szwed 	if (spdk_unlikely(write_start_lba != cmp_start_lba || write_num_blocks != cmp_num_blocks)) {
52205e7f56cSMaciej Szwed 		SPDK_ERRLOG("Fused command start lba / num blocks mismatch\n");
52305e7f56cSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
52405e7f56cSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
52505e7f56cSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
52605e7f56cSMaciej Szwed 	}
52705e7f56cSMaciej Szwed 
52805e7f56cSMaciej Szwed 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, write_start_lba,
52905e7f56cSMaciej Szwed 			  write_num_blocks))) {
53005e7f56cSMaciej Szwed 		SPDK_ERRLOG("end of media\n");
53105e7f56cSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
53205e7f56cSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
53305e7f56cSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
53405e7f56cSMaciej Szwed 	}
53505e7f56cSMaciej Szwed 
53605e7f56cSMaciej Szwed 	if (spdk_unlikely(write_num_blocks * block_size > write_req->length)) {
53705e7f56cSMaciej Szwed 		SPDK_ERRLOG("Write NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
53805e7f56cSMaciej Szwed 			    write_num_blocks, block_size, write_req->length);
53905e7f56cSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
54005e7f56cSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
54105e7f56cSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
54205e7f56cSMaciej Szwed 	}
54305e7f56cSMaciej Szwed 
54405e7f56cSMaciej Szwed 	rc = spdk_bdev_comparev_and_writev_blocks(desc, ch, cmp_req->iov, cmp_req->iovcnt, write_req->iov,
54505e7f56cSMaciej Szwed 			write_req->iovcnt, write_start_lba, write_num_blocks, nvmf_bdev_ctrlr_complete_cmd, write_req);
54605e7f56cSMaciej Szwed 	if (spdk_unlikely(rc)) {
54705e7f56cSMaciej Szwed 		if (rc == -ENOMEM) {
548ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(cmp_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, cmp_req);
549ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(write_req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, write_req);
55005e7f56cSMaciej Szwed 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
55105e7f56cSMaciej Szwed 		}
55205e7f56cSMaciej Szwed 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
55305e7f56cSMaciej Szwed 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
55405e7f56cSMaciej Szwed 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
55505e7f56cSMaciej Szwed 	}
55605e7f56cSMaciej Szwed 
55705e7f56cSMaciej Szwed 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
55805e7f56cSMaciej Szwed }
55905e7f56cSMaciej Szwed 
56005e7f56cSMaciej Szwed int
5619cb21ad6SSeth Howell nvmf_bdev_ctrlr_write_zeroes_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
5620caab4e1SDaniel Verkamp 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
5630caab4e1SDaniel Verkamp {
5640caab4e1SDaniel Verkamp 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
5650caab4e1SDaniel Verkamp 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
5660caab4e1SDaniel Verkamp 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
567c7feb85dSHaoqian He 	uint64_t max_write_zeroes_size = req->qpair->ctrlr->subsys->max_write_zeroes_size_kib;
5680caab4e1SDaniel Verkamp 	uint64_t start_lba;
5690caab4e1SDaniel Verkamp 	uint64_t num_blocks;
570d9b3149eSTomasz Zawadzki 	int rc;
5710caab4e1SDaniel Verkamp 
5720caab4e1SDaniel Verkamp 	nvmf_bdev_ctrlr_get_rw_params(cmd, &start_lba, &num_blocks);
573c7feb85dSHaoqian He 	if (spdk_unlikely(max_write_zeroes_size > 0 &&
574b09de013SShuhei Matsumoto 			  num_blocks > (max_write_zeroes_size << 10) / spdk_bdev_desc_get_block_size(desc))) {
575c7feb85dSHaoqian He 		SPDK_ERRLOG("invalid write zeroes size, should not exceed %" PRIu64 "Kib\n", max_write_zeroes_size);
576c7feb85dSHaoqian He 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
577c7feb85dSHaoqian He 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
578c7feb85dSHaoqian He 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
579c7feb85dSHaoqian He 	}
5800caab4e1SDaniel Verkamp 
5810caab4e1SDaniel Verkamp 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
5820caab4e1SDaniel Verkamp 		SPDK_ERRLOG("end of media\n");
5830caab4e1SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
5840caab4e1SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
5850caab4e1SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
5860caab4e1SDaniel Verkamp 	}
5870caab4e1SDaniel Verkamp 
58840681adcSJacek Kalwas 	if (spdk_unlikely(cmd->cdw12_bits.write_zeroes.deac)) {
58940681adcSJacek Kalwas 		SPDK_ERRLOG("Write Zeroes Deallocate is not supported\n");
59040681adcSJacek Kalwas 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
59140681adcSJacek Kalwas 		rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
59240681adcSJacek Kalwas 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
59340681adcSJacek Kalwas 	}
59440681adcSJacek Kalwas 
595d9b3149eSTomasz Zawadzki 	rc = spdk_bdev_write_zeroes_blocks(desc, ch, start_lba, num_blocks,
596d9b3149eSTomasz Zawadzki 					   nvmf_bdev_ctrlr_complete_cmd, req);
597d9b3149eSTomasz Zawadzki 	if (spdk_unlikely(rc)) {
598d9b3149eSTomasz Zawadzki 		if (rc == -ENOMEM) {
599ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
600d9b3149eSTomasz Zawadzki 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
601d9b3149eSTomasz Zawadzki 		}
6020caab4e1SDaniel Verkamp 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
6030caab4e1SDaniel Verkamp 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
6040caab4e1SDaniel Verkamp 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
6050caab4e1SDaniel Verkamp 	}
6060caab4e1SDaniel Verkamp 
6070caab4e1SDaniel Verkamp 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
6080caab4e1SDaniel Verkamp }
6090caab4e1SDaniel Verkamp 
6101b6b6cc4SBen Walker int
6119cb21ad6SSeth Howell nvmf_bdev_ctrlr_flush_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
612f6e62d2cSBen Walker 			  struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
613f6e62d2cSBen Walker {
614f6e62d2cSBen Walker 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
615d9b3149eSTomasz Zawadzki 	int rc;
616f6e62d2cSBen Walker 
61714a18ccaSChangpeng Liu 	/* As for NVMeoF controller, SPDK always set volatile write
61814a18ccaSChangpeng Liu 	 * cache bit to 1, return success for those block devices
61914a18ccaSChangpeng Liu 	 * which can't support FLUSH command.
62014a18ccaSChangpeng Liu 	 */
62114a18ccaSChangpeng Liu 	if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_FLUSH)) {
62214a18ccaSChangpeng Liu 		response->status.sct = SPDK_NVME_SCT_GENERIC;
62314a18ccaSChangpeng Liu 		response->status.sc = SPDK_NVME_SC_SUCCESS;
62414a18ccaSChangpeng Liu 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
62514a18ccaSChangpeng Liu 	}
62614a18ccaSChangpeng Liu 
627d9b3149eSTomasz Zawadzki 	rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(bdev),
628d9b3149eSTomasz Zawadzki 				    nvmf_bdev_ctrlr_complete_cmd, req);
629d9b3149eSTomasz Zawadzki 	if (spdk_unlikely(rc)) {
630d9b3149eSTomasz Zawadzki 		if (rc == -ENOMEM) {
631ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
632d9b3149eSTomasz Zawadzki 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
633d9b3149eSTomasz Zawadzki 		}
634f6e62d2cSBen Walker 		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
635f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
636f6e62d2cSBen Walker 	}
637f6e62d2cSBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
638f6e62d2cSBen Walker }
639f6e62d2cSBen Walker 
640f52f6aeeSBen Walker struct nvmf_bdev_ctrlr_unmap {
64160c38d40SBen Walker 	struct spdk_nvmf_request	*req;
64260c38d40SBen Walker 	uint32_t			count;
643d9b3149eSTomasz Zawadzki 	struct spdk_bdev_desc		*desc;
644d9b3149eSTomasz Zawadzki 	struct spdk_bdev		*bdev;
645d9b3149eSTomasz Zawadzki 	struct spdk_io_channel		*ch;
6462b0ae30bSYair Elharrar 	uint32_t			range_index;
64760c38d40SBen Walker };
64860c38d40SBen Walker 
64960c38d40SBen Walker static void
6505f0df585SBen Walker nvmf_bdev_ctrlr_unmap_cpl(struct spdk_bdev_io *bdev_io, bool success,
65160c38d40SBen Walker 			  void *cb_arg)
65260c38d40SBen Walker {
653f52f6aeeSBen Walker 	struct nvmf_bdev_ctrlr_unmap *unmap_ctx = cb_arg;
65460c38d40SBen Walker 	struct spdk_nvmf_request	*req = unmap_ctx->req;
65560c38d40SBen Walker 	struct spdk_nvme_cpl		*response = &req->rsp->nvme_cpl;
65660c38d40SBen Walker 	int				sc, sct;
65753b92a6cSMichael Haeuptle 	uint32_t			cdw0;
65860c38d40SBen Walker 
65960c38d40SBen Walker 	unmap_ctx->count--;
66060c38d40SBen Walker 
66160c38d40SBen Walker 	if (response->status.sct == SPDK_NVME_SCT_GENERIC &&
66260c38d40SBen Walker 	    response->status.sc == SPDK_NVME_SC_SUCCESS) {
66353b92a6cSMichael Haeuptle 		spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
66453b92a6cSMichael Haeuptle 		response->cdw0 = cdw0;
66560c38d40SBen Walker 		response->status.sc = sc;
66660c38d40SBen Walker 		response->status.sct = sct;
66760c38d40SBen Walker 	}
66860c38d40SBen Walker 
66960c38d40SBen Walker 	if (unmap_ctx->count == 0) {
67060c38d40SBen Walker 		spdk_nvmf_request_complete(req);
67160c38d40SBen Walker 		free(unmap_ctx);
67260c38d40SBen Walker 	}
6730bce7616SChangpeng Liu 	spdk_bdev_free_io(bdev_io);
67460c38d40SBen Walker }
67560c38d40SBen Walker 
6768dd1cd21SBen Walker static int nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
677d9b3149eSTomasz Zawadzki 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
678f52f6aeeSBen Walker 				 struct nvmf_bdev_ctrlr_unmap *unmap_ctx);
679d9b3149eSTomasz Zawadzki static void
6805f0df585SBen Walker nvmf_bdev_ctrlr_unmap_resubmit(void *arg)
681d9b3149eSTomasz Zawadzki {
682f52f6aeeSBen Walker 	struct nvmf_bdev_ctrlr_unmap *unmap_ctx = arg;
683d9b3149eSTomasz Zawadzki 	struct spdk_nvmf_request *req = unmap_ctx->req;
684d9b3149eSTomasz Zawadzki 	struct spdk_bdev_desc *desc = unmap_ctx->desc;
685d9b3149eSTomasz Zawadzki 	struct spdk_bdev *bdev = unmap_ctx->bdev;
686d9b3149eSTomasz Zawadzki 	struct spdk_io_channel *ch = unmap_ctx->ch;
687d9b3149eSTomasz Zawadzki 
6885f0df585SBen Walker 	nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
689d9b3149eSTomasz Zawadzki }
690d9b3149eSTomasz Zawadzki 
691d9b3149eSTomasz Zawadzki static int
6925f0df585SBen Walker nvmf_bdev_ctrlr_unmap(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
693d9b3149eSTomasz Zawadzki 		      struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
694f52f6aeeSBen Walker 		      struct nvmf_bdev_ctrlr_unmap *unmap_ctx)
695f6e62d2cSBen Walker {
69660c38d40SBen Walker 	uint16_t nr, i;
697f6e62d2cSBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
698f6e62d2cSBen Walker 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
699c7feb85dSHaoqian He 	uint64_t max_discard_size = req->qpair->ctrlr->subsys->max_discard_size_kib;
700b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
701ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
7025f0df585SBen Walker 	uint64_t lba;
7035f0df585SBen Walker 	uint32_t lba_count;
704d9b3149eSTomasz Zawadzki 	int rc;
705f6e62d2cSBen Walker 
7061fea1fccSChangpeng Liu 	nr = cmd->cdw10_bits.dsm.nr + 1;
707f6e62d2cSBen Walker 	if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
708f6e62d2cSBen Walker 		SPDK_ERRLOG("Dataset Management number of ranges > SGL length\n");
709f6e62d2cSBen Walker 		response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
710f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
711f6e62d2cSBen Walker 	}
712f6e62d2cSBen Walker 
713d9b3149eSTomasz Zawadzki 	if (unmap_ctx == NULL) {
71460c38d40SBen Walker 		unmap_ctx = calloc(1, sizeof(*unmap_ctx));
71560c38d40SBen Walker 		if (!unmap_ctx) {
716f6e62d2cSBen Walker 			response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
717f6e62d2cSBen Walker 			return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
718f6e62d2cSBen Walker 		}
719f6e62d2cSBen Walker 
72060c38d40SBen Walker 		unmap_ctx->req = req;
721d9b3149eSTomasz Zawadzki 		unmap_ctx->desc = desc;
722d9b3149eSTomasz Zawadzki 		unmap_ctx->ch = ch;
7232b0ae30bSYair Elharrar 		unmap_ctx->bdev = bdev;
72460c38d40SBen Walker 
72560c38d40SBen Walker 		response->status.sct = SPDK_NVME_SCT_GENERIC;
72660c38d40SBen Walker 		response->status.sc = SPDK_NVME_SC_SUCCESS;
7272b0ae30bSYair Elharrar 	} else {
7282b0ae30bSYair Elharrar 		unmap_ctx->count--;	/* dequeued */
7292b0ae30bSYair Elharrar 	}
73060c38d40SBen Walker 
731ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
732ecc80dfcSJohn Levon 
7332b0ae30bSYair Elharrar 	for (i = unmap_ctx->range_index; i < nr; i++) {
734ecc80dfcSJohn Levon 		struct spdk_nvme_dsm_range dsm_range = { 0 };
735ecc80dfcSJohn Levon 
736ecc80dfcSJohn Levon 		spdk_iov_xfer_to_buf(&ix, &dsm_range, sizeof(dsm_range));
737ecc80dfcSJohn Levon 
738ecc80dfcSJohn Levon 		lba = dsm_range.starting_lba;
739ecc80dfcSJohn Levon 		lba_count = dsm_range.length;
740c7feb85dSHaoqian He 		if (max_discard_size > 0 && lba_count > (max_discard_size << 10) / block_size) {
741081f43f2SVasuki Manikarnike 			SPDK_ERRLOG("invalid unmap size %" PRIu32 " blocks, should not exceed %" PRIu64 " blocks\n",
742081f43f2SVasuki Manikarnike 				    lba_count, max_discard_size << 1);
743c7feb85dSHaoqian He 			response->status.sct = SPDK_NVME_SCT_GENERIC;
744c7feb85dSHaoqian He 			response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
745081f43f2SVasuki Manikarnike 			break;
746c7feb85dSHaoqian He 		}
74760c38d40SBen Walker 
74860c38d40SBen Walker 		unmap_ctx->count++;
74960c38d40SBen Walker 
750d9b3149eSTomasz Zawadzki 		rc = spdk_bdev_unmap_blocks(desc, ch, lba, lba_count,
7515f0df585SBen Walker 					    nvmf_bdev_ctrlr_unmap_cpl, unmap_ctx);
752d9b3149eSTomasz Zawadzki 		if (rc) {
753d9b3149eSTomasz Zawadzki 			if (rc == -ENOMEM) {
7545f0df585SBen Walker 				nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
755d9b3149eSTomasz Zawadzki 				/* Unmap was not yet submitted to bdev */
7562b0ae30bSYair Elharrar 				/* unmap_ctx->count will be decremented when the request is dequeued */
757d9b3149eSTomasz Zawadzki 				return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
758d9b3149eSTomasz Zawadzki 			}
759f6e62d2cSBen Walker 			response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
76060c38d40SBen Walker 			unmap_ctx->count--;
76160c38d40SBen Walker 			/* We can't return here - we may have to wait for any other
76260c38d40SBen Walker 				* unmaps already sent to complete */
76360c38d40SBen Walker 			break;
764f6e62d2cSBen Walker 		}
7652b0ae30bSYair Elharrar 		unmap_ctx->range_index++;
766f6e62d2cSBen Walker 	}
767f6e62d2cSBen Walker 
76860c38d40SBen Walker 	if (unmap_ctx->count == 0) {
76960c38d40SBen Walker 		free(unmap_ctx);
77060c38d40SBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
77160c38d40SBen Walker 	}
77260c38d40SBen Walker 
773f6e62d2cSBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
774f6e62d2cSBen Walker }
77560c38d40SBen Walker 
7761b6b6cc4SBen Walker int
7779cb21ad6SSeth Howell nvmf_bdev_ctrlr_dsm_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
7785f0df585SBen Walker 			struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
7795f0df585SBen Walker {
7805f0df585SBen Walker 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
7815f0df585SBen Walker 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
7825f0df585SBen Walker 
7830c9057f0SChangpeng Liu 	if (cmd->cdw11_bits.dsm.ad) {
7845f0df585SBen Walker 		return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
7855f0df585SBen Walker 	}
7865f0df585SBen Walker 
787565932d2SDaniel Verkamp 	response->status.sct = SPDK_NVME_SCT_GENERIC;
788565932d2SDaniel Verkamp 	response->status.sc = SPDK_NVME_SC_SUCCESS;
789f6e62d2cSBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
790f6e62d2cSBen Walker }
791f6e62d2cSBen Walker 
7921b6b6cc4SBen Walker int
7938305e49bSEvgeniy Kochetov nvmf_bdev_ctrlr_copy_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
7948305e49bSEvgeniy Kochetov 			 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
7958305e49bSEvgeniy Kochetov {
7968305e49bSEvgeniy Kochetov 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
7978305e49bSEvgeniy Kochetov 	struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
7988305e49bSEvgeniy Kochetov 	uint64_t sdlba = ((uint64_t)cmd->cdw11 << 32) + cmd->cdw10;
799ecc80dfcSJohn Levon 	struct spdk_nvme_scc_source_range range = { 0 };
800ecc80dfcSJohn Levon 	struct spdk_iov_xfer ix;
8018305e49bSEvgeniy Kochetov 	int rc;
8028305e49bSEvgeniy Kochetov 
8038305e49bSEvgeniy Kochetov 	SPDK_DEBUGLOG(nvmf, "Copy command: SDLBA %lu, NR %u, desc format %u, PRINFOR %u, "
8048305e49bSEvgeniy Kochetov 		      "DTYPE %u, STCW %u, PRINFOW %u, FUA %u, LR %u\n",
8058305e49bSEvgeniy Kochetov 		      sdlba,
8068305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.nr,
8078305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.df,
8088305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.prinfor,
8098305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.dtype,
8108305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.stcw,
8118305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.prinfow,
8128305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.fua,
8138305e49bSEvgeniy Kochetov 		      cmd->cdw12_bits.copy.lr);
814a64acd10SKonrad Sztyber 
815a64acd10SKonrad Sztyber 	if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) *
816a64acd10SKonrad Sztyber 			  sizeof(struct spdk_nvme_scc_source_range))) {
817a64acd10SKonrad Sztyber 		response->status.sct = SPDK_NVME_SCT_GENERIC;
818a64acd10SKonrad Sztyber 		response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
819a64acd10SKonrad Sztyber 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
820a64acd10SKonrad Sztyber 	}
8218305e49bSEvgeniy Kochetov 
822ecc80dfcSJohn Levon 	/*
823ecc80dfcSJohn Levon 	 * We support only one source range, and rely on this with the xfer
824ecc80dfcSJohn Levon 	 * below.
825ecc80dfcSJohn Levon 	 */
8268305e49bSEvgeniy Kochetov 	if (cmd->cdw12_bits.copy.nr > 0) {
8278305e49bSEvgeniy Kochetov 		response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
8288305e49bSEvgeniy Kochetov 		response->status.sc = SPDK_NVME_SC_CMD_SIZE_LIMIT_SIZE_EXCEEDED;
8298305e49bSEvgeniy Kochetov 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8308305e49bSEvgeniy Kochetov 	}
8318305e49bSEvgeniy Kochetov 
8328305e49bSEvgeniy Kochetov 	if (cmd->cdw12_bits.copy.df != 0) {
8338305e49bSEvgeniy Kochetov 		response->status.sct = SPDK_NVME_SCT_GENERIC;
8348305e49bSEvgeniy Kochetov 		response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
8358305e49bSEvgeniy Kochetov 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8368305e49bSEvgeniy Kochetov 	}
8378305e49bSEvgeniy Kochetov 
838ecc80dfcSJohn Levon 	spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
839ecc80dfcSJohn Levon 	spdk_iov_xfer_to_buf(&ix, &range, sizeof(range));
840ecc80dfcSJohn Levon 
841ecc80dfcSJohn Levon 	rc = spdk_bdev_copy_blocks(desc, ch, sdlba, range.slba, range.nlb + 1,
8428305e49bSEvgeniy Kochetov 				   nvmf_bdev_ctrlr_complete_cmd, req);
8438305e49bSEvgeniy Kochetov 	if (spdk_unlikely(rc)) {
8448305e49bSEvgeniy Kochetov 		if (rc == -ENOMEM) {
8458305e49bSEvgeniy Kochetov 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
8468305e49bSEvgeniy Kochetov 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
8478305e49bSEvgeniy Kochetov 		}
8488305e49bSEvgeniy Kochetov 
8498305e49bSEvgeniy Kochetov 		response->status.sct = SPDK_NVME_SCT_GENERIC;
8508305e49bSEvgeniy Kochetov 		response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
8518305e49bSEvgeniy Kochetov 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8528305e49bSEvgeniy Kochetov 	}
8538305e49bSEvgeniy Kochetov 
8548305e49bSEvgeniy Kochetov 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
8558305e49bSEvgeniy Kochetov }
8568305e49bSEvgeniy Kochetov 
8578305e49bSEvgeniy Kochetov int
8589cb21ad6SSeth Howell nvmf_bdev_ctrlr_nvme_passthru_io(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
859f6e62d2cSBen Walker 				 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
860f6e62d2cSBen Walker {
861d9b3149eSTomasz Zawadzki 	int rc;
862d9b3149eSTomasz Zawadzki 
86304a428f5SKarl Bonde Torp 	rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt,
86404a428f5SKarl Bonde Torp 					    req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req);
8656b206e31SJohn Levon 
866d9b3149eSTomasz Zawadzki 	if (spdk_unlikely(rc)) {
867d9b3149eSTomasz Zawadzki 		if (rc == -ENOMEM) {
868ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
869d9b3149eSTomasz Zawadzki 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
870d9b3149eSTomasz Zawadzki 		}
871f6e62d2cSBen Walker 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
872f6e62d2cSBen Walker 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
8731fd2af01SEvgeniy Kochetov 		req->rsp->nvme_cpl.status.dnr = 1;
874f6e62d2cSBen Walker 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
875f6e62d2cSBen Walker 	}
876f6e62d2cSBen Walker 
877f6e62d2cSBen Walker 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
878f6e62d2cSBen Walker }
8799b04e291SShuhei Matsumoto 
8803fa22056SMichael Haeuptle int
8813fa22056SMichael Haeuptle spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
8823fa22056SMichael Haeuptle 		struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
8833fa22056SMichael Haeuptle 		spdk_nvmf_nvme_passthru_cmd_cb cb_fn)
8843fa22056SMichael Haeuptle {
8853fa22056SMichael Haeuptle 	int rc;
8863fa22056SMichael Haeuptle 
8871a53c8a5SZiv Hirsch 	if (spdk_unlikely(req->iovcnt > 1)) {
8886b206e31SJohn Levon 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
8896b206e31SJohn Levon 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
8906b206e31SJohn Levon 		req->rsp->nvme_cpl.status.dnr = 1;
8916b206e31SJohn Levon 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
8926b206e31SJohn Levon 	}
8936b206e31SJohn Levon 
8943fa22056SMichael Haeuptle 	req->cmd_cb_fn = cb_fn;
8953fa22056SMichael Haeuptle 
8966b206e31SJohn Levon 	rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length,
8973fa22056SMichael Haeuptle 					   nvmf_bdev_ctrlr_complete_admin_cmd, req);
8983fa22056SMichael Haeuptle 	if (spdk_unlikely(rc)) {
8993fa22056SMichael Haeuptle 		if (rc == -ENOMEM) {
900ce42d9d1SSeth Howell 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
9013fa22056SMichael Haeuptle 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
9023fa22056SMichael Haeuptle 		}
9033fa22056SMichael Haeuptle 		req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
904d03b31c6SEvgeniy Kochetov 		if (rc == -ENOTSUP) {
905d03b31c6SEvgeniy Kochetov 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
906d03b31c6SEvgeniy Kochetov 		} else {
9073fa22056SMichael Haeuptle 			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
908d03b31c6SEvgeniy Kochetov 		}
909d03b31c6SEvgeniy Kochetov 
9101fd2af01SEvgeniy Kochetov 		req->rsp->nvme_cpl.status.dnr = 1;
9113fa22056SMichael Haeuptle 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
9123fa22056SMichael Haeuptle 	}
9133fa22056SMichael Haeuptle 
9143fa22056SMichael Haeuptle 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
9153fa22056SMichael Haeuptle }
9163fa22056SMichael Haeuptle 
91791b80fe2SShuhei Matsumoto static void
91891b80fe2SShuhei Matsumoto nvmf_bdev_ctrlr_complete_abort_cmd(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
91991b80fe2SShuhei Matsumoto {
92091b80fe2SShuhei Matsumoto 	struct spdk_nvmf_request *req = cb_arg;
92191b80fe2SShuhei Matsumoto 
92291b80fe2SShuhei Matsumoto 	if (success) {
92391b80fe2SShuhei Matsumoto 		req->rsp->nvme_cpl.cdw0 &= ~1U;
92491b80fe2SShuhei Matsumoto 	}
92591b80fe2SShuhei Matsumoto 
92691b80fe2SShuhei Matsumoto 	spdk_nvmf_request_complete(req);
92791b80fe2SShuhei Matsumoto 	spdk_bdev_free_io(bdev_io);
92891b80fe2SShuhei Matsumoto }
92991b80fe2SShuhei Matsumoto 
93091b80fe2SShuhei Matsumoto int
93162649a7dSMichael Haeuptle spdk_nvmf_bdev_ctrlr_abort_cmd(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
93291b80fe2SShuhei Matsumoto 			       struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
93391b80fe2SShuhei Matsumoto 			       struct spdk_nvmf_request *req_to_abort)
93491b80fe2SShuhei Matsumoto {
93591b80fe2SShuhei Matsumoto 	int rc;
93691b80fe2SShuhei Matsumoto 
93791b80fe2SShuhei Matsumoto 	assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0);
93891b80fe2SShuhei Matsumoto 
93991b80fe2SShuhei Matsumoto 	rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req);
94091b80fe2SShuhei Matsumoto 	if (spdk_likely(rc == 0)) {
94191b80fe2SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
94291b80fe2SShuhei Matsumoto 	} else if (rc == -ENOMEM) {
94391b80fe2SShuhei Matsumoto 		nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
94491b80fe2SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
94591b80fe2SShuhei Matsumoto 	} else {
94691b80fe2SShuhei Matsumoto 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
94791b80fe2SShuhei Matsumoto 	}
94891b80fe2SShuhei Matsumoto }
94991b80fe2SShuhei Matsumoto 
9509b04e291SShuhei Matsumoto bool
951b09de013SShuhei Matsumoto nvmf_bdev_ctrlr_get_dif_ctx(struct spdk_bdev_desc *desc, struct spdk_nvme_cmd *cmd,
9529b04e291SShuhei Matsumoto 			    struct spdk_dif_ctx *dif_ctx)
9539b04e291SShuhei Matsumoto {
9549b04e291SShuhei Matsumoto 	uint32_t init_ref_tag, dif_check_flags = 0;
9559b04e291SShuhei Matsumoto 	int rc;
956a711d629SSlawomir Ptak 	struct spdk_dif_ctx_init_ext_opts dif_opts;
9579b04e291SShuhei Matsumoto 
958b09de013SShuhei Matsumoto 	if (spdk_bdev_desc_get_md_size(desc) == 0) {
9599b04e291SShuhei Matsumoto 		return false;
9609b04e291SShuhei Matsumoto 	}
9619b04e291SShuhei Matsumoto 
9629b04e291SShuhei Matsumoto 	/* Initial Reference Tag is the lower 32 bits of the start LBA. */
9639b04e291SShuhei Matsumoto 	init_ref_tag = (uint32_t)from_le64(&cmd->cdw10);
9649b04e291SShuhei Matsumoto 
965b09de013SShuhei Matsumoto 	if (spdk_bdev_desc_is_dif_check_enabled(desc, SPDK_DIF_CHECK_TYPE_REFTAG)) {
9669b04e291SShuhei Matsumoto 		dif_check_flags |= SPDK_DIF_FLAGS_REFTAG_CHECK;
9679b04e291SShuhei Matsumoto 	}
9689b04e291SShuhei Matsumoto 
969b09de013SShuhei Matsumoto 	if (spdk_bdev_desc_is_dif_check_enabled(desc, SPDK_DIF_CHECK_TYPE_GUARD)) {
9709b04e291SShuhei Matsumoto 		dif_check_flags |= SPDK_DIF_FLAGS_GUARD_CHECK;
9719b04e291SShuhei Matsumoto 	}
9729b04e291SShuhei Matsumoto 
9735681a8a6SKonrad Sztyber 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
974a711d629SSlawomir Ptak 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
9759b04e291SShuhei Matsumoto 	rc = spdk_dif_ctx_init(dif_ctx,
976b09de013SShuhei Matsumoto 			       spdk_bdev_desc_get_block_size(desc),
977b09de013SShuhei Matsumoto 			       spdk_bdev_desc_get_md_size(desc),
978b09de013SShuhei Matsumoto 			       spdk_bdev_desc_is_md_interleaved(desc),
979b09de013SShuhei Matsumoto 			       spdk_bdev_desc_is_dif_head_of_md(desc),
980b09de013SShuhei Matsumoto 			       spdk_bdev_desc_get_dif_type(desc),
9819b04e291SShuhei Matsumoto 			       dif_check_flags,
982a711d629SSlawomir Ptak 			       init_ref_tag, 0, 0, 0, 0, &dif_opts);
9839b04e291SShuhei Matsumoto 
9849b04e291SShuhei Matsumoto 	return (rc == 0) ? true : false;
9859b04e291SShuhei Matsumoto }
9865818b42fSmatthewb 
9875818b42fSmatthewb static void
9880e09df57SKonrad Sztyber nvmf_bdev_ctrlr_zcopy_start_complete(struct spdk_bdev_io *bdev_io, bool success,
9895818b42fSmatthewb 				     void *cb_arg)
9905818b42fSmatthewb {
9915818b42fSmatthewb 	struct spdk_nvmf_request	*req = cb_arg;
9925818b42fSmatthewb 	struct iovec *iov;
9931b575d83SSzulik, Maciej 	int iovcnt = 0;
9945818b42fSmatthewb 
9955818b42fSmatthewb 	if (spdk_unlikely(!success)) {
9965818b42fSmatthewb 		int                     sc = 0, sct = 0;
9975818b42fSmatthewb 		uint32_t                cdw0 = 0;
9985818b42fSmatthewb 		struct spdk_nvme_cpl    *response = &req->rsp->nvme_cpl;
9995818b42fSmatthewb 		spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
10005818b42fSmatthewb 
10015818b42fSmatthewb 		response->cdw0 = cdw0;
10025818b42fSmatthewb 		response->status.sc = sc;
10035818b42fSmatthewb 		response->status.sct = sct;
10045818b42fSmatthewb 
10055818b42fSmatthewb 		spdk_bdev_free_io(bdev_io);
10065818b42fSmatthewb 		spdk_nvmf_request_complete(req);
10075818b42fSmatthewb 		return;
10085818b42fSmatthewb 	}
10095818b42fSmatthewb 
10105818b42fSmatthewb 	spdk_bdev_io_get_iovec(bdev_io, &iov, &iovcnt);
10115818b42fSmatthewb 
10125818b42fSmatthewb 	assert(iovcnt <= NVMF_REQ_MAX_BUFFERS);
1013fb1ace67SBen Walker 	assert(iovcnt > 0);
10145818b42fSmatthewb 
10155818b42fSmatthewb 	req->iovcnt = iovcnt;
10165818b42fSmatthewb 
10175818b42fSmatthewb 	assert(req->iov == iov);
10185818b42fSmatthewb 
10195818b42fSmatthewb 	req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */
10205818b42fSmatthewb 
10215818b42fSmatthewb 	spdk_nvmf_request_complete(req);
10225818b42fSmatthewb 	/* Don't free the bdev_io here as it is needed for the END ZCOPY */
10235818b42fSmatthewb }
10245818b42fSmatthewb 
10255818b42fSmatthewb int
10260e09df57SKonrad Sztyber nvmf_bdev_ctrlr_zcopy_start(struct spdk_bdev *bdev,
10275818b42fSmatthewb 			    struct spdk_bdev_desc *desc,
10285818b42fSmatthewb 			    struct spdk_io_channel *ch,
10295818b42fSmatthewb 			    struct spdk_nvmf_request *req)
10305818b42fSmatthewb {
1031686b9984SKonrad Sztyber 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
10325818b42fSmatthewb 	uint64_t bdev_num_blocks = spdk_bdev_get_num_blocks(bdev);
1033b09de013SShuhei Matsumoto 	uint32_t block_size = spdk_bdev_desc_get_block_size(desc);
10345818b42fSmatthewb 	uint64_t start_lba;
10355818b42fSmatthewb 	uint64_t num_blocks;
1036686b9984SKonrad Sztyber 	int rc;
10375818b42fSmatthewb 
10385818b42fSmatthewb 	nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
10395818b42fSmatthewb 
10405818b42fSmatthewb 	if (spdk_unlikely(!nvmf_bdev_ctrlr_lba_in_range(bdev_num_blocks, start_lba, num_blocks))) {
10415818b42fSmatthewb 		SPDK_ERRLOG("end of media\n");
1042686b9984SKonrad Sztyber 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1043686b9984SKonrad Sztyber 		rsp->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
1044686b9984SKonrad Sztyber 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
10455818b42fSmatthewb 	}
10465818b42fSmatthewb 
10475818b42fSmatthewb 	if (spdk_unlikely(num_blocks * block_size > req->length)) {
10485818b42fSmatthewb 		SPDK_ERRLOG("Read NLB %" PRIu64 " * block size %" PRIu32 " > SGL length %" PRIu32 "\n",
10495818b42fSmatthewb 			    num_blocks, block_size, req->length);
1050686b9984SKonrad Sztyber 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1051686b9984SKonrad Sztyber 		rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1052686b9984SKonrad Sztyber 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
10535818b42fSmatthewb 	}
10545818b42fSmatthewb 
10555818b42fSmatthewb 	bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
10565818b42fSmatthewb 
1057686b9984SKonrad Sztyber 	rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
10580e09df57SKonrad Sztyber 				   num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
1059686b9984SKonrad Sztyber 	if (spdk_unlikely(rc != 0)) {
1060686b9984SKonrad Sztyber 		if (rc == -ENOMEM) {
1061686b9984SKonrad Sztyber 			nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
1062686b9984SKonrad Sztyber 			return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
1063686b9984SKonrad Sztyber 		}
1064686b9984SKonrad Sztyber 		rsp->status.sct = SPDK_NVME_SCT_GENERIC;
1065686b9984SKonrad Sztyber 		rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1066686b9984SKonrad Sztyber 		return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
1067686b9984SKonrad Sztyber 	}
1068686b9984SKonrad Sztyber 
1069686b9984SKonrad Sztyber 	return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
10705818b42fSmatthewb }
10715818b42fSmatthewb 
10725818b42fSmatthewb static void
10730e09df57SKonrad Sztyber nvmf_bdev_ctrlr_zcopy_end_complete(struct spdk_bdev_io *bdev_io, bool success,
10745818b42fSmatthewb 				   void *cb_arg)
10755818b42fSmatthewb {
10765818b42fSmatthewb 	struct spdk_nvmf_request	*req = cb_arg;
10775818b42fSmatthewb 
10785818b42fSmatthewb 	if (spdk_unlikely(!success)) {
10795818b42fSmatthewb 		int                     sc = 0, sct = 0;
10805818b42fSmatthewb 		uint32_t                cdw0 = 0;
10815818b42fSmatthewb 		struct spdk_nvme_cpl    *response = &req->rsp->nvme_cpl;
10825818b42fSmatthewb 		spdk_bdev_io_get_nvme_status(bdev_io, &cdw0, &sct, &sc);
10835818b42fSmatthewb 
10845818b42fSmatthewb 		response->cdw0 = cdw0;
10855818b42fSmatthewb 		response->status.sc = sc;
10865818b42fSmatthewb 		response->status.sct = sct;
10875818b42fSmatthewb 	}
10885818b42fSmatthewb 
10895818b42fSmatthewb 	spdk_bdev_free_io(bdev_io);
10905818b42fSmatthewb 	req->zcopy_bdev_io = NULL;
10915818b42fSmatthewb 	spdk_nvmf_request_complete(req);
10925818b42fSmatthewb }
10935818b42fSmatthewb 
10947a374fbcSKonrad Sztyber void
10950e09df57SKonrad Sztyber nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
10965818b42fSmatthewb {
1097686b9984SKonrad Sztyber 	int rc __attribute__((unused));
1098686b9984SKonrad Sztyber 
1099686b9984SKonrad Sztyber 	rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);
1100686b9984SKonrad Sztyber 
1101686b9984SKonrad Sztyber 	/* The only way spdk_bdev_zcopy_end() can fail is if we pass a bdev_io type that isn't ZCOPY */
1102686b9984SKonrad Sztyber 	assert(rc == 0);
11035818b42fSmatthewb }
1104