xref: /spdk/lib/nvmf/transport.c (revision aac967c0d312ef9076b316afd934926f687e5336)
1488570ebSJim Harris /*   SPDX-License-Identifier: BSD-3-Clause
2a6dbe372Spaul luse  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
36263457cSAlexey Marchuk  *   Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
44045068aSAlexey Marchuk  *   Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
521c450e1SDaniel Verkamp  */
621c450e1SDaniel Verkamp 
7b961d9ccSBen Walker #include "spdk/stdinc.h"
821c450e1SDaniel Verkamp 
97c6ca978SBen Walker #include "nvmf_internal.h"
10b961d9ccSBen Walker #include "transport.h"
1121c450e1SDaniel Verkamp 
12c4fee1e9SPawel Wodkowski #include "spdk/config.h"
1321c450e1SDaniel Verkamp #include "spdk/log.h"
141e6ffa03SBen Walker #include "spdk/nvmf.h"
15cc353f0eSBen Walker #include "spdk/nvmf_transport.h"
1621c450e1SDaniel Verkamp #include "spdk/queue.h"
17b58a5d73SDaniel Verkamp #include "spdk/util.h"
18a266b6e4SKonrad Sztyber #include "spdk_internal/usdt.h"
1921c450e1SDaniel Verkamp 
2001b6bd8aSChangpeng Liu #define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
21f038354eSSeth Howell 
22f038354eSSeth Howell struct nvmf_transport_ops_list_element {
23f038354eSSeth Howell 	struct spdk_nvmf_transport_ops			ops;
24f038354eSSeth Howell 	TAILQ_ENTRY(nvmf_transport_ops_list_element)	link;
2521c450e1SDaniel Verkamp };
2621c450e1SDaniel Verkamp 
27f038354eSSeth Howell TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
28f038354eSSeth Howell g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
2921c450e1SDaniel Verkamp 
308e808490SJohn Barnard static inline const struct spdk_nvmf_transport_ops *
316dec2087SSeth Howell nvmf_get_transport_ops(const char *transport_name)
3221c450e1SDaniel Verkamp {
33f038354eSSeth Howell 	struct nvmf_transport_ops_list_element *ops;
34f038354eSSeth Howell 	TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
35f038354eSSeth Howell 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
36f038354eSSeth Howell 			return &ops->ops;
3721c450e1SDaniel Verkamp 		}
3821c450e1SDaniel Verkamp 	}
398e808490SJohn Barnard 	return NULL;
408e808490SJohn Barnard }
4121c450e1SDaniel Verkamp 
42f038354eSSeth Howell void
43f038354eSSeth Howell spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
44f038354eSSeth Howell {
45f038354eSSeth Howell 	struct nvmf_transport_ops_list_element *new_ops;
46f038354eSSeth Howell 
476dec2087SSeth Howell 	if (nvmf_get_transport_ops(ops->name) != NULL) {
48f038354eSSeth Howell 		SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
49f038354eSSeth Howell 		assert(false);
50f038354eSSeth Howell 		return;
51f038354eSSeth Howell 	}
52f038354eSSeth Howell 
53f038354eSSeth Howell 	new_ops = calloc(1, sizeof(*new_ops));
54f038354eSSeth Howell 	if (new_ops == NULL) {
55f038354eSSeth Howell 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
56f038354eSSeth Howell 		assert(false);
57f038354eSSeth Howell 		return;
58f038354eSSeth Howell 	}
59f038354eSSeth Howell 
60f038354eSSeth Howell 	new_ops->ops = *ops;
61f038354eSSeth Howell 
62f038354eSSeth Howell 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
63f038354eSSeth Howell }
64f038354eSSeth Howell 
65433a1e7bSSeth Howell const struct spdk_nvmf_transport_opts *
66433a1e7bSSeth Howell spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
67433a1e7bSSeth Howell {
68433a1e7bSSeth Howell 	return &transport->opts;
69433a1e7bSSeth Howell }
70433a1e7bSSeth Howell 
71220bcf7dSJacek Kalwas void
72220bcf7dSJacek Kalwas nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
73220bcf7dSJacek Kalwas 			 bool named)
74220bcf7dSJacek Kalwas {
75220bcf7dSJacek Kalwas 	const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
76220bcf7dSJacek Kalwas 
77220bcf7dSJacek Kalwas 	named ? spdk_json_write_named_object_begin(w, "params") : spdk_json_write_object_begin(w);
78220bcf7dSJacek Kalwas 
79220bcf7dSJacek Kalwas 	spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
80220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
81220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
82220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
83220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
84220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
85220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
86220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
87220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
88220bcf7dSJacek Kalwas 	spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
89aa1d0398SKonrad Sztyber 	spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
90220bcf7dSJacek Kalwas 
91220bcf7dSJacek Kalwas 	if (transport->ops->dump_opts) {
92220bcf7dSJacek Kalwas 		transport->ops->dump_opts(transport, w);
93220bcf7dSJacek Kalwas 	}
94220bcf7dSJacek Kalwas 
95220bcf7dSJacek Kalwas 	spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
9679606beeSKonrad Sztyber 	spdk_json_write_named_uint32(w, "ack_timeout", opts->ack_timeout);
977dab13c0SAlexey Marchuk 	spdk_json_write_named_uint32(w, "data_wr_pool_size", opts->data_wr_pool_size);
98220bcf7dSJacek Kalwas 	spdk_json_write_object_end(w);
99220bcf7dSJacek Kalwas }
100220bcf7dSJacek Kalwas 
1011f094a94SJacek Kalwas void
102f5260201SWojciech Panfil nvmf_transport_listen_dump_trid(const struct spdk_nvme_transport_id *trid,
103f5260201SWojciech Panfil 				struct spdk_json_write_ctx *w)
1041f094a94SJacek Kalwas {
1051f094a94SJacek Kalwas 	const char *adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
1061f094a94SJacek Kalwas 
1071f094a94SJacek Kalwas 	spdk_json_write_named_string(w, "trtype", trid->trstring);
1081f094a94SJacek Kalwas 	spdk_json_write_named_string(w, "adrfam", adrfam ? adrfam : "unknown");
1091f094a94SJacek Kalwas 	spdk_json_write_named_string(w, "traddr", trid->traddr);
1101f094a94SJacek Kalwas 	spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
1111f094a94SJacek Kalwas }
1121f094a94SJacek Kalwas 
113433a1e7bSSeth Howell spdk_nvme_transport_type_t
114433a1e7bSSeth Howell spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
115433a1e7bSSeth Howell {
116433a1e7bSSeth Howell 	return transport->ops->type;
117433a1e7bSSeth Howell }
118433a1e7bSSeth Howell 
1190651753cSJacek Kalwas const char *
1200651753cSJacek Kalwas spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
1210651753cSJacek Kalwas {
1220651753cSJacek Kalwas 	return transport->ops->name;
1230651753cSJacek Kalwas }
1240651753cSJacek Kalwas 
1258dd1cd21SBen Walker static void
1268dd1cd21SBen Walker nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
1273b16c6ddSZiye Yang 			 struct spdk_nvmf_transport_opts *opts_src,
1283b16c6ddSZiye Yang 			 size_t opts_size)
1293b16c6ddSZiye Yang {
1303b16c6ddSZiye Yang 	assert(opts);
1313b16c6ddSZiye Yang 	assert(opts_src);
1323b16c6ddSZiye Yang 
1333b16c6ddSZiye Yang 	opts->opts_size = opts_size;
1343b16c6ddSZiye Yang 
1353b16c6ddSZiye Yang #define SET_FIELD(field) \
1363b16c6ddSZiye Yang 	if (offsetof(struct spdk_nvmf_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
1373b16c6ddSZiye Yang 		opts->field = opts_src->field; \
1383b16c6ddSZiye Yang 	} \
1393b16c6ddSZiye Yang 
1403b16c6ddSZiye Yang 	SET_FIELD(max_queue_depth);
1413b16c6ddSZiye Yang 	SET_FIELD(max_qpairs_per_ctrlr);
1423b16c6ddSZiye Yang 	SET_FIELD(in_capsule_data_size);
1433b16c6ddSZiye Yang 	SET_FIELD(max_io_size);
1443b16c6ddSZiye Yang 	SET_FIELD(io_unit_size);
1453b16c6ddSZiye Yang 	SET_FIELD(max_aq_depth);
1463b16c6ddSZiye Yang 	SET_FIELD(buf_cache_size);
1473b16c6ddSZiye Yang 	SET_FIELD(num_shared_buffers);
1483b16c6ddSZiye Yang 	SET_FIELD(dif_insert_or_strip);
1493b16c6ddSZiye Yang 	SET_FIELD(abort_timeout_sec);
1503b16c6ddSZiye Yang 	SET_FIELD(association_timeout);
1513b16c6ddSZiye Yang 	SET_FIELD(transport_specific);
15243022da3SJacek Kalwas 	SET_FIELD(acceptor_poll_rate);
153aa1d0398SKonrad Sztyber 	SET_FIELD(zcopy);
15479606beeSKonrad Sztyber 	SET_FIELD(ack_timeout);
1557dab13c0SAlexey Marchuk 	SET_FIELD(data_wr_pool_size);
1563b16c6ddSZiye Yang 
1573b16c6ddSZiye Yang 	/* Do not remove this statement, you should always update this statement when you adding a new field,
1583b16c6ddSZiye Yang 	 * and do not forget to add the SET_FIELD statement for your added field. */
1597dab13c0SAlexey Marchuk 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 72, "Incorrect size");
1603b16c6ddSZiye Yang 
1613b16c6ddSZiye Yang #undef SET_FIELD
1623b16c6ddSZiye Yang #undef FILED_CHECK
1633b16c6ddSZiye Yang }
1643b16c6ddSZiye Yang 
16596073478SJacek Kalwas struct nvmf_transport_create_ctx {
16696073478SJacek Kalwas 	const struct spdk_nvmf_transport_ops *ops;
16796073478SJacek Kalwas 	struct spdk_nvmf_transport_opts opts;
16896073478SJacek Kalwas 	void *cb_arg;
16996073478SJacek Kalwas 	spdk_nvmf_transport_create_done_cb cb_fn;
17096073478SJacek Kalwas };
17196073478SJacek Kalwas 
172a68e0930SKalwas, Jacek static bool
173a68e0930SKalwas, Jacek nvmf_transport_use_iobuf(struct spdk_nvmf_transport *transport)
174a68e0930SKalwas, Jacek {
175a68e0930SKalwas, Jacek 	return transport->opts.num_shared_buffers || transport->opts.buf_cache_size;
176a68e0930SKalwas, Jacek }
177a68e0930SKalwas, Jacek 
17896073478SJacek Kalwas static void
17996073478SJacek Kalwas nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *transport)
1808e808490SJohn Barnard {
18196073478SJacek Kalwas 	struct nvmf_transport_create_ctx *ctx = cb_arg;
182e28605f4SSeth Howell 	int chars_written;
1833b16c6ddSZiye Yang 
18431d033f9SBen Walker 	if (!transport) {
18596073478SJacek Kalwas 		SPDK_ERRLOG("Failed to create transport.\n");
18696073478SJacek Kalwas 		goto err;
18731d033f9SBen Walker 	}
18831d033f9SBen Walker 
189b17919d8SJacek Kalwas 	pthread_mutex_init(&transport->mutex, NULL);
1906d8f1fc6SJacek Kalwas 	TAILQ_INIT(&transport->listeners);
19196073478SJacek Kalwas 	transport->ops = ctx->ops;
19296073478SJacek Kalwas 	transport->opts = ctx->opts;
193ef8bcce5SJacek Kalwas 	chars_written = snprintf(transport->iobuf_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s", "nvmf",
194ef8bcce5SJacek Kalwas 				 transport->ops->name);
195e28605f4SSeth Howell 	if (chars_written < 0) {
196e28605f4SSeth Howell 		SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
19796073478SJacek Kalwas 		goto err;
198e28605f4SSeth Howell 	}
199e28605f4SSeth Howell 
200a68e0930SKalwas, Jacek 	if (nvmf_transport_use_iobuf(transport)) {
20173ba05f7SBen Walker 		spdk_iobuf_register_module(transport->iobuf_name);
202c138dfd3SChangpeng Liu 	}
2038e808490SJohn Barnard 
20496073478SJacek Kalwas 	ctx->cb_fn(ctx->cb_arg, transport);
20596073478SJacek Kalwas 	free(ctx);
20696073478SJacek Kalwas 	return;
20796073478SJacek Kalwas 
20896073478SJacek Kalwas err:
20996073478SJacek Kalwas 	if (transport) {
21096073478SJacek Kalwas 		transport->ops->destroy(transport, NULL, NULL);
21196073478SJacek Kalwas 	}
21296073478SJacek Kalwas 
21396073478SJacek Kalwas 	ctx->cb_fn(ctx->cb_arg, NULL);
21496073478SJacek Kalwas 	free(ctx);
21596073478SJacek Kalwas }
21696073478SJacek Kalwas 
21796073478SJacek Kalwas static void
21896073478SJacek Kalwas _nvmf_transport_create_done(void *ctx)
21996073478SJacek Kalwas {
22096073478SJacek Kalwas 	struct nvmf_transport_create_ctx *_ctx = (struct nvmf_transport_create_ctx *)ctx;
22196073478SJacek Kalwas 
22296073478SJacek Kalwas 	nvmf_transport_create_async_done(_ctx, _ctx->ops->create(&_ctx->opts));
22396073478SJacek Kalwas }
22496073478SJacek Kalwas 
22596073478SJacek Kalwas static int
22696073478SJacek Kalwas nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
22796073478SJacek Kalwas 		      spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg, bool sync)
22896073478SJacek Kalwas {
22996073478SJacek Kalwas 	struct nvmf_transport_create_ctx *ctx;
23073ba05f7SBen Walker 	struct spdk_iobuf_opts opts_iobuf = {};
23196073478SJacek Kalwas 	int rc;
23273ba05f7SBen Walker 	uint64_t count;
23396073478SJacek Kalwas 
23496073478SJacek Kalwas 	ctx = calloc(1, sizeof(*ctx));
23596073478SJacek Kalwas 	if (!ctx) {
23696073478SJacek Kalwas 		return -ENOMEM;
23796073478SJacek Kalwas 	}
23896073478SJacek Kalwas 
23996073478SJacek Kalwas 	if (!opts) {
24096073478SJacek Kalwas 		SPDK_ERRLOG("opts should not be NULL\n");
24196073478SJacek Kalwas 		goto err;
24296073478SJacek Kalwas 	}
24396073478SJacek Kalwas 
24496073478SJacek Kalwas 	if (!opts->opts_size) {
24596073478SJacek Kalwas 		SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
24696073478SJacek Kalwas 		goto err;
24796073478SJacek Kalwas 	}
24896073478SJacek Kalwas 
24996073478SJacek Kalwas 	ctx->ops = nvmf_get_transport_ops(transport_name);
25096073478SJacek Kalwas 	if (!ctx->ops) {
25196073478SJacek Kalwas 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
25296073478SJacek Kalwas 		goto err;
25396073478SJacek Kalwas 	}
25496073478SJacek Kalwas 
25596073478SJacek Kalwas 	nvmf_transport_opts_copy(&ctx->opts, opts, opts->opts_size);
25696073478SJacek Kalwas 	if (ctx->opts.max_io_size != 0 && (!spdk_u32_is_pow2(ctx->opts.max_io_size) ||
25796073478SJacek Kalwas 					   ctx->opts.max_io_size < 8192)) {
25896073478SJacek Kalwas 		SPDK_ERRLOG("max_io_size %u must be a power of 2 and be greater than or equal 8KB\n",
25996073478SJacek Kalwas 			    ctx->opts.max_io_size);
26096073478SJacek Kalwas 		goto err;
26196073478SJacek Kalwas 	}
26296073478SJacek Kalwas 
26396073478SJacek Kalwas 	if (ctx->opts.max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
26496073478SJacek Kalwas 		SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
26596073478SJacek Kalwas 			    ctx->opts.max_aq_depth);
26696073478SJacek Kalwas 		ctx->opts.max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
26796073478SJacek Kalwas 	}
26896073478SJacek Kalwas 
269194983eeSJohn Levon 	spdk_iobuf_get_opts(&opts_iobuf, sizeof(opts_iobuf));
270604c1b6fSKrzysztof Karas 	if (ctx->opts.io_unit_size == 0) {
271604c1b6fSKrzysztof Karas 		SPDK_ERRLOG("io_unit_size cannot be 0\n");
272604c1b6fSKrzysztof Karas 		goto err;
273604c1b6fSKrzysztof Karas 	}
27473ba05f7SBen Walker 	if (ctx->opts.io_unit_size > opts_iobuf.large_bufsize) {
27573ba05f7SBen Walker 		SPDK_ERRLOG("io_unit_size %u is larger than iobuf pool large buffer size %d\n",
27673ba05f7SBen Walker 			    ctx->opts.io_unit_size, opts_iobuf.large_bufsize);
27773ba05f7SBen Walker 		goto err;
27873ba05f7SBen Walker 	}
27973ba05f7SBen Walker 
28073ba05f7SBen Walker 	if (ctx->opts.io_unit_size <= opts_iobuf.small_bufsize) {
28173ba05f7SBen Walker 		/* We'll be using the small buffer pool only */
28273ba05f7SBen Walker 		count = opts_iobuf.small_pool_count;
28373ba05f7SBen Walker 	} else {
28473ba05f7SBen Walker 		count = spdk_min(opts_iobuf.small_pool_count, opts_iobuf.large_pool_count);
28573ba05f7SBen Walker 	}
28673ba05f7SBen Walker 
28773ba05f7SBen Walker 	if (ctx->opts.num_shared_buffers > count) {
28873ba05f7SBen Walker 		SPDK_WARNLOG("The num_shared_buffers value (%u) is larger than the available iobuf"
28973ba05f7SBen Walker 			     " pool size (%lu). Please increase the iobuf pool sizes.\n",
29073ba05f7SBen Walker 			     ctx->opts.num_shared_buffers, count);
29173ba05f7SBen Walker 	}
29273ba05f7SBen Walker 
29396073478SJacek Kalwas 	ctx->cb_fn = cb_fn;
29496073478SJacek Kalwas 	ctx->cb_arg = cb_arg;
29596073478SJacek Kalwas 
29696073478SJacek Kalwas 	/* Prioritize sync create operation. */
29796073478SJacek Kalwas 	if (ctx->ops->create) {
29896073478SJacek Kalwas 		if (sync) {
29996073478SJacek Kalwas 			_nvmf_transport_create_done(ctx);
30096073478SJacek Kalwas 			return 0;
30196073478SJacek Kalwas 		}
30296073478SJacek Kalwas 
30396073478SJacek Kalwas 		rc = spdk_thread_send_msg(spdk_get_thread(), _nvmf_transport_create_done, ctx);
30496073478SJacek Kalwas 		if (rc) {
30596073478SJacek Kalwas 			goto err;
30696073478SJacek Kalwas 		}
30796073478SJacek Kalwas 
30896073478SJacek Kalwas 		return 0;
30996073478SJacek Kalwas 	}
31096073478SJacek Kalwas 
31196073478SJacek Kalwas 	assert(ctx->ops->create_async);
31296073478SJacek Kalwas 	rc = ctx->ops->create_async(&ctx->opts, nvmf_transport_create_async_done, ctx);
31396073478SJacek Kalwas 	if (rc) {
31496073478SJacek Kalwas 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
31596073478SJacek Kalwas 		goto err;
31696073478SJacek Kalwas 	}
31796073478SJacek Kalwas 
31896073478SJacek Kalwas 	return 0;
31996073478SJacek Kalwas err:
32096073478SJacek Kalwas 	free(ctx);
32196073478SJacek Kalwas 	return -1;
32296073478SJacek Kalwas }
32396073478SJacek Kalwas 
32496073478SJacek Kalwas int
32596073478SJacek Kalwas spdk_nvmf_transport_create_async(const char *transport_name, struct spdk_nvmf_transport_opts *opts,
32696073478SJacek Kalwas 				 spdk_nvmf_transport_create_done_cb cb_fn, void *cb_arg)
32796073478SJacek Kalwas {
32896073478SJacek Kalwas 	return nvmf_transport_create(transport_name, opts, cb_fn, cb_arg, false);
32996073478SJacek Kalwas }
33096073478SJacek Kalwas 
33196073478SJacek Kalwas static void
33296073478SJacek Kalwas nvmf_transport_create_sync_done(void *cb_arg, struct spdk_nvmf_transport *transport)
33396073478SJacek Kalwas {
33496073478SJacek Kalwas 	struct spdk_nvmf_transport **_transport = cb_arg;
33596073478SJacek Kalwas 
33696073478SJacek Kalwas 	*_transport = transport;
33796073478SJacek Kalwas }
33896073478SJacek Kalwas 
33996073478SJacek Kalwas struct spdk_nvmf_transport *
34096073478SJacek Kalwas spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
34196073478SJacek Kalwas {
34296073478SJacek Kalwas 	struct spdk_nvmf_transport *transport = NULL;
34396073478SJacek Kalwas 
34496073478SJacek Kalwas 	/* Current implementation supports synchronous version of create operation only. */
34596073478SJacek Kalwas 	assert(nvmf_get_transport_ops(transport_name) && nvmf_get_transport_ops(transport_name)->create);
34696073478SJacek Kalwas 
34796073478SJacek Kalwas 	nvmf_transport_create(transport_name, opts, nvmf_transport_create_sync_done, &transport, true);
34831d033f9SBen Walker 	return transport;
34921c450e1SDaniel Verkamp }
35021c450e1SDaniel Verkamp 
351433a1e7bSSeth Howell struct spdk_nvmf_transport *
352433a1e7bSSeth Howell spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
353433a1e7bSSeth Howell {
354433a1e7bSSeth Howell 	return TAILQ_FIRST(&tgt->transports);
355433a1e7bSSeth Howell }
356433a1e7bSSeth Howell 
357433a1e7bSSeth Howell struct spdk_nvmf_transport *
358433a1e7bSSeth Howell spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
359433a1e7bSSeth Howell {
360433a1e7bSSeth Howell 	return TAILQ_NEXT(transport, link);
361433a1e7bSSeth Howell }
362433a1e7bSSeth Howell 
36321c450e1SDaniel Verkamp int
3640d98a949SNaresh Gottumukkala spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
3650d98a949SNaresh Gottumukkala 			    spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
36621c450e1SDaniel Verkamp {
36729d94b7fSAlexey Marchuk 	struct spdk_nvmf_listener *listener, *listener_tmp;
36829d94b7fSAlexey Marchuk 
36929d94b7fSAlexey Marchuk 	TAILQ_FOREACH_SAFE(listener, &transport->listeners, link, listener_tmp) {
37029d94b7fSAlexey Marchuk 		TAILQ_REMOVE(&transport->listeners, listener, link);
37129d94b7fSAlexey Marchuk 		transport->ops->stop_listen(transport, &listener->trid);
37229d94b7fSAlexey Marchuk 		free(listener);
37329d94b7fSAlexey Marchuk 	}
37429d94b7fSAlexey Marchuk 
375a68e0930SKalwas, Jacek 	if (nvmf_transport_use_iobuf(transport)) {
37673ba05f7SBen Walker 		spdk_iobuf_unregister_module(transport->iobuf_name);
37773ba05f7SBen Walker 	}
37873ba05f7SBen Walker 
379b17919d8SJacek Kalwas 	pthread_mutex_destroy(&transport->mutex);
3800d98a949SNaresh Gottumukkala 	return transport->ops->destroy(transport, cb_fn, cb_arg);
38121c450e1SDaniel Verkamp }
38221c450e1SDaniel Verkamp 
3836d8f1fc6SJacek Kalwas struct spdk_nvmf_listener *
3849cb21ad6SSeth Howell nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
3856d8f1fc6SJacek Kalwas 			     const struct spdk_nvme_transport_id *trid)
3866d8f1fc6SJacek Kalwas {
3876d8f1fc6SJacek Kalwas 	struct spdk_nvmf_listener *listener;
3886d8f1fc6SJacek Kalwas 
3896d8f1fc6SJacek Kalwas 	TAILQ_FOREACH(listener, &transport->listeners, link) {
3906d8f1fc6SJacek Kalwas 		if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
3916d8f1fc6SJacek Kalwas 			return listener;
3926d8f1fc6SJacek Kalwas 		}
3936d8f1fc6SJacek Kalwas 	}
3946d8f1fc6SJacek Kalwas 
3956d8f1fc6SJacek Kalwas 	return NULL;
3966d8f1fc6SJacek Kalwas }
3976d8f1fc6SJacek Kalwas 
3987e3b9f25SBen Walker int
3997e3b9f25SBen Walker spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
40087a062e6SJacek Kalwas 			   const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts)
4017e3b9f25SBen Walker {
40233d5eee1SJacek Kalwas 	struct spdk_nvmf_listener *listener;
403fdd5151eSyidong0635 	int rc;
40433d5eee1SJacek Kalwas 
4059cb21ad6SSeth Howell 	listener = nvmf_transport_find_listener(transport, trid);
4066d8f1fc6SJacek Kalwas 	if (!listener) {
4076d8f1fc6SJacek Kalwas 		listener = calloc(1, sizeof(*listener));
4086d8f1fc6SJacek Kalwas 		if (!listener) {
4096d8f1fc6SJacek Kalwas 			return -ENOMEM;
4106d8f1fc6SJacek Kalwas 		}
4116d8f1fc6SJacek Kalwas 
4126d8f1fc6SJacek Kalwas 		listener->ref = 1;
4136d8f1fc6SJacek Kalwas 		listener->trid = *trid;
414248c547dSKarl Bonde Torp 		listener->sock_impl = opts->sock_impl;
4156d8f1fc6SJacek Kalwas 		TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
416b17919d8SJacek Kalwas 		pthread_mutex_lock(&transport->mutex);
41787a062e6SJacek Kalwas 		rc = transport->ops->listen(transport, &listener->trid, opts);
418b17919d8SJacek Kalwas 		pthread_mutex_unlock(&transport->mutex);
419fdd5151eSyidong0635 		if (rc != 0) {
420fdd5151eSyidong0635 			TAILQ_REMOVE(&transport->listeners, listener, link);
421fdd5151eSyidong0635 			free(listener);
422fdd5151eSyidong0635 		}
423fdd5151eSyidong0635 		return rc;
4246d8f1fc6SJacek Kalwas 	}
4256d8f1fc6SJacek Kalwas 
426248c547dSKarl Bonde Torp 	if (opts->sock_impl && strncmp(opts->sock_impl, listener->sock_impl, strlen(listener->sock_impl))) {
427248c547dSKarl Bonde Torp 		SPDK_ERRLOG("opts->sock_impl: '%s' doesn't match listener->sock_impl: '%s'\n", opts->sock_impl,
428248c547dSKarl Bonde Torp 			    listener->sock_impl);
429248c547dSKarl Bonde Torp 		return -EINVAL;
430248c547dSKarl Bonde Torp 	}
431248c547dSKarl Bonde Torp 
4326d8f1fc6SJacek Kalwas 	++listener->ref;
4336d8f1fc6SJacek Kalwas 
4346d8f1fc6SJacek Kalwas 	return 0;
4357e3b9f25SBen Walker }
4367e3b9f25SBen Walker 
4377e3b9f25SBen Walker int
4387e3b9f25SBen Walker spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
4397e3b9f25SBen Walker 				const struct spdk_nvme_transport_id *trid)
4407e3b9f25SBen Walker {
44133d5eee1SJacek Kalwas 	struct spdk_nvmf_listener *listener;
44233d5eee1SJacek Kalwas 
4439cb21ad6SSeth Howell 	listener = nvmf_transport_find_listener(transport, trid);
4446d8f1fc6SJacek Kalwas 	if (!listener) {
4456d8f1fc6SJacek Kalwas 		return -ENOENT;
4466d8f1fc6SJacek Kalwas 	}
4476d8f1fc6SJacek Kalwas 
4486d8f1fc6SJacek Kalwas 	if (--listener->ref == 0) {
4496d8f1fc6SJacek Kalwas 		TAILQ_REMOVE(&transport->listeners, listener, link);
450b17919d8SJacek Kalwas 		pthread_mutex_lock(&transport->mutex);
4516d8f1fc6SJacek Kalwas 		transport->ops->stop_listen(transport, trid);
452b17919d8SJacek Kalwas 		pthread_mutex_unlock(&transport->mutex);
4536d8f1fc6SJacek Kalwas 		free(listener);
4546d8f1fc6SJacek Kalwas 	}
4556d8f1fc6SJacek Kalwas 
4566d8f1fc6SJacek Kalwas 	return 0;
4577e3b9f25SBen Walker }
4587e3b9f25SBen Walker 
45949ee92a6SSeth Howell struct nvmf_stop_listen_ctx {
46049ee92a6SSeth Howell 	struct spdk_nvmf_transport *transport;
46149ee92a6SSeth Howell 	struct spdk_nvme_transport_id trid;
46279727986SJim Harris 	struct spdk_nvmf_subsystem *subsystem;
46349ee92a6SSeth Howell 	spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
46449ee92a6SSeth Howell 	void *cb_arg;
46549ee92a6SSeth Howell };
46649ee92a6SSeth Howell 
46749ee92a6SSeth Howell static void
46849ee92a6SSeth Howell nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
46949ee92a6SSeth Howell {
47049ee92a6SSeth Howell 	struct nvmf_stop_listen_ctx *ctx;
47149ee92a6SSeth Howell 	struct spdk_nvmf_transport *transport;
47249ee92a6SSeth Howell 	int rc = status;
47349ee92a6SSeth Howell 
47449ee92a6SSeth Howell 	ctx = spdk_io_channel_iter_get_ctx(i);
47549ee92a6SSeth Howell 	transport = ctx->transport;
47649ee92a6SSeth Howell 	assert(transport != NULL);
47749ee92a6SSeth Howell 
47849ee92a6SSeth Howell 	rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
47949ee92a6SSeth Howell 	if (rc) {
48049ee92a6SSeth Howell 		SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
48149ee92a6SSeth Howell 	}
48249ee92a6SSeth Howell 
48349ee92a6SSeth Howell 	if (ctx->cb_fn) {
48449ee92a6SSeth Howell 		ctx->cb_fn(ctx->cb_arg, rc);
48549ee92a6SSeth Howell 	}
48649ee92a6SSeth Howell 	free(ctx);
48749ee92a6SSeth Howell }
48849ee92a6SSeth Howell 
4894045068aSAlexey Marchuk static void nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i);
4904045068aSAlexey Marchuk 
4914045068aSAlexey Marchuk static void
4924045068aSAlexey Marchuk nvmf_stop_listen_disconnect_qpairs_msg(void *ctx)
4934045068aSAlexey Marchuk {
4944045068aSAlexey Marchuk 	nvmf_stop_listen_disconnect_qpairs((struct spdk_io_channel_iter *)ctx);
4954045068aSAlexey Marchuk }
4964045068aSAlexey Marchuk 
49749ee92a6SSeth Howell static void
49849ee92a6SSeth Howell nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
49949ee92a6SSeth Howell {
50049ee92a6SSeth Howell 	struct nvmf_stop_listen_ctx *ctx;
50149ee92a6SSeth Howell 	struct spdk_nvmf_poll_group *group;
50249ee92a6SSeth Howell 	struct spdk_io_channel *ch;
50349ee92a6SSeth Howell 	struct spdk_nvmf_qpair *qpair, *tmp_qpair;
50449ee92a6SSeth Howell 	struct spdk_nvme_transport_id tmp_trid;
5054045068aSAlexey Marchuk 	bool qpair_found = false;
50649ee92a6SSeth Howell 
50749ee92a6SSeth Howell 	ctx = spdk_io_channel_iter_get_ctx(i);
50849ee92a6SSeth Howell 	ch = spdk_io_channel_iter_get_channel(i);
50949ee92a6SSeth Howell 	group = spdk_io_channel_get_ctx(ch);
51049ee92a6SSeth Howell 
51149ee92a6SSeth Howell 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
51249ee92a6SSeth Howell 		if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
51349ee92a6SSeth Howell 			continue;
51449ee92a6SSeth Howell 		}
51549ee92a6SSeth Howell 
5162e565122SJim Harris 		/* Skip qpairs that don't match the listen trid and subsystem pointer.  If
5172e565122SJim Harris 		 * the ctx->subsystem is NULL, it means disconnect all qpairs that match
5182e565122SJim Harris 		 * the listen trid. */
51949ee92a6SSeth Howell 		if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
5202e565122SJim Harris 			if (ctx->subsystem == NULL ||
5212e565122SJim Harris 			    (qpair->ctrlr != NULL && ctx->subsystem == qpair->ctrlr->subsys)) {
522608b54a2SKonrad Sztyber 				spdk_nvmf_qpair_disconnect(qpair);
5234045068aSAlexey Marchuk 				qpair_found = true;
52449ee92a6SSeth Howell 			}
52549ee92a6SSeth Howell 		}
52679727986SJim Harris 	}
5274045068aSAlexey Marchuk 	if (qpair_found) {
5284045068aSAlexey Marchuk 		spdk_thread_send_msg(spdk_get_thread(), nvmf_stop_listen_disconnect_qpairs_msg, i);
5294045068aSAlexey Marchuk 		return;
5304045068aSAlexey Marchuk 	}
5314045068aSAlexey Marchuk 
53249ee92a6SSeth Howell 	spdk_for_each_channel_continue(i, 0);
53349ee92a6SSeth Howell }
53449ee92a6SSeth Howell 
53549ee92a6SSeth Howell int
53649ee92a6SSeth Howell spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
53749ee92a6SSeth Howell 				      const struct spdk_nvme_transport_id *trid,
53879727986SJim Harris 				      struct spdk_nvmf_subsystem *subsystem,
53949ee92a6SSeth Howell 				      spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
54049ee92a6SSeth Howell 				      void *cb_arg)
54149ee92a6SSeth Howell {
54249ee92a6SSeth Howell 	struct nvmf_stop_listen_ctx *ctx;
54349ee92a6SSeth Howell 
544baf250e5SJim Harris 	if (trid->subnqn[0] != '\0') {
545baf250e5SJim Harris 		SPDK_ERRLOG("subnqn should be empty, use subsystem pointer instead\n");
546baf250e5SJim Harris 		return -EINVAL;
547baf250e5SJim Harris 	}
548baf250e5SJim Harris 
54949ee92a6SSeth Howell 	ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
55049ee92a6SSeth Howell 	if (ctx == NULL) {
55149ee92a6SSeth Howell 		return -ENOMEM;
55249ee92a6SSeth Howell 	}
55349ee92a6SSeth Howell 
55449ee92a6SSeth Howell 	ctx->trid = *trid;
55579727986SJim Harris 	ctx->subsystem = subsystem;
55649ee92a6SSeth Howell 	ctx->transport = transport;
55749ee92a6SSeth Howell 	ctx->cb_fn = cb_fn;
55849ee92a6SSeth Howell 	ctx->cb_arg = cb_arg;
55949ee92a6SSeth Howell 
56049ee92a6SSeth Howell 	spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
56149ee92a6SSeth Howell 			      nvmf_stop_listen_fini);
56249ee92a6SSeth Howell 
56349ee92a6SSeth Howell 	return 0;
56449ee92a6SSeth Howell }
56549ee92a6SSeth Howell 
566a4e28342SBen Walker void
56761d85773SSeth Howell nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
5686428de9eSBen Walker 				 struct spdk_nvme_transport_id *trid,
569a4e28342SBen Walker 				 struct spdk_nvmf_discovery_log_page_entry *entry)
570a4e28342SBen Walker {
5716428de9eSBen Walker 	transport->ops->listener_discover(transport, trid, entry);
572a4e28342SBen Walker }
573a4e28342SBen Walker 
574*aac967c0SJim Harris static int
575*aac967c0SJim Harris nvmf_tgroup_poll(void *arg)
576*aac967c0SJim Harris {
577*aac967c0SJim Harris 	struct spdk_nvmf_transport_poll_group *tgroup = arg;
578*aac967c0SJim Harris 	int rc;
579*aac967c0SJim Harris 
580*aac967c0SJim Harris 	rc = nvmf_transport_poll_group_poll(tgroup);
581*aac967c0SJim Harris 	return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
582*aac967c0SJim Harris }
583*aac967c0SJim Harris 
584*aac967c0SJim Harris static void
585*aac967c0SJim Harris nvmf_transport_poll_group_create_poller(struct spdk_nvmf_transport_poll_group *tgroup)
586*aac967c0SJim Harris {
587*aac967c0SJim Harris 	char poller_name[SPDK_NVMF_TRSTRING_MAX_LEN + 32];
588*aac967c0SJim Harris 
589*aac967c0SJim Harris 	snprintf(poller_name, sizeof(poller_name), "nvmf_%s", tgroup->transport->ops->name);
590*aac967c0SJim Harris 	tgroup->poller = spdk_poller_register_named(nvmf_tgroup_poll, tgroup, 0, poller_name);
591*aac967c0SJim Harris 	spdk_poller_register_interrupt(tgroup->poller, NULL, NULL);
592*aac967c0SJim Harris }
593*aac967c0SJim Harris 
594c1535ca0SBen Walker struct spdk_nvmf_transport_poll_group *
5955e373163SJohn Levon nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
5965e373163SJohn Levon 				 struct spdk_nvmf_poll_group *group)
597a4e28342SBen Walker {
5985e373163SJohn Levon 	struct spdk_nvmf_transport_poll_group *tgroup;
59973ba05f7SBen Walker 	struct spdk_iobuf_opts opts_iobuf = {};
60073ba05f7SBen Walker 	uint32_t buf_cache_size, small_cache_size, large_cache_size;
60173ba05f7SBen Walker 	int rc;
602a4e28342SBen Walker 
603b17919d8SJacek Kalwas 	pthread_mutex_lock(&transport->mutex);
6045e373163SJohn Levon 	tgroup = transport->ops->poll_group_create(transport, group);
605b17919d8SJacek Kalwas 	pthread_mutex_unlock(&transport->mutex);
6065e373163SJohn Levon 	if (!tgroup) {
607d722a174SEvgeniy Kochetov 		return NULL;
608d722a174SEvgeniy Kochetov 	}
6095e373163SJohn Levon 	tgroup->transport = transport;
610*aac967c0SJim Harris 	nvmf_transport_poll_group_create_poller(tgroup);
611a4e28342SBen Walker 
6125e373163SJohn Levon 	STAILQ_INIT(&tgroup->pending_buf_queue);
6138cb172f2SSeth Howell 
614a68e0930SKalwas, Jacek 	if (!nvmf_transport_use_iobuf(transport)) {
615a68e0930SKalwas, Jacek 		/* We aren't going to allocate any shared buffers or cache, so just return now. */
616280a3abcSJim Harris 		return tgroup;
617280a3abcSJim Harris 	}
618280a3abcSJim Harris 
61973ba05f7SBen Walker 	buf_cache_size = transport->opts.buf_cache_size;
62073ba05f7SBen Walker 
6213092c61dSJim Harris 	/* buf_cache_size of UINT32_MAX means the value should be calculated dynamically
6223092c61dSJim Harris 	 * based on the number of buffers in the shared pool and the number of poll groups
6233092c61dSJim Harris 	 * that are sharing them.  We allocate 75% of the pool for the cache, and then
6243092c61dSJim Harris 	 * divide that by number of poll groups to determine the buf_cache_size for this
6253092c61dSJim Harris 	 * poll group.
6263092c61dSJim Harris 	 */
62773ba05f7SBen Walker 	if (buf_cache_size == UINT32_MAX) {
6283092c61dSJim Harris 		uint32_t num_shared_buffers = transport->opts.num_shared_buffers;
62973ba05f7SBen Walker 
6303092c61dSJim Harris 		/* Theoretically the nvmf library can dynamically add poll groups to
6313092c61dSJim Harris 		 * the target, after transports have already been created.  We aren't
6323092c61dSJim Harris 		 * going to try to really handle this case efficiently, just do enough
6333092c61dSJim Harris 		 * here to ensure we don't divide-by-zero.
6343092c61dSJim Harris 		 */
6353092c61dSJim Harris 		uint16_t num_poll_groups = group->tgt->num_poll_groups ? : spdk_env_get_core_count();
6363092c61dSJim Harris 
63773ba05f7SBen Walker 		buf_cache_size = (num_shared_buffers * 3 / 4) / num_poll_groups;
6383092c61dSJim Harris 	}
6393092c61dSJim Harris 
640194983eeSJohn Levon 	spdk_iobuf_get_opts(&opts_iobuf, sizeof(opts_iobuf));
64173ba05f7SBen Walker 	small_cache_size = buf_cache_size;
64273ba05f7SBen Walker 	if (transport->opts.io_unit_size <= opts_iobuf.small_bufsize) {
64373ba05f7SBen Walker 		large_cache_size = 0;
64473ba05f7SBen Walker 	} else {
64573ba05f7SBen Walker 		large_cache_size = buf_cache_size;
6468cb172f2SSeth Howell 	}
647903fdb75SAlexey Marchuk 
648847e8915SJacek Kalwas 	tgroup->buf_cache = calloc(1, sizeof(*tgroup->buf_cache));
649847e8915SJacek Kalwas 	if (!tgroup->buf_cache) {
650847e8915SJacek Kalwas 		SPDK_ERRLOG("Unable to allocate an iobuf channel in the poll group.\n");
651847e8915SJacek Kalwas 		goto err;
652847e8915SJacek Kalwas 	}
653903fdb75SAlexey Marchuk 
654847e8915SJacek Kalwas 	rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, small_cache_size,
655847e8915SJacek Kalwas 				     large_cache_size);
65673ba05f7SBen Walker 	if (rc != 0) {
65773ba05f7SBen Walker 		SPDK_ERRLOG("Unable to reserve the full number of buffers for the pg buffer cache.\n");
658847e8915SJacek Kalwas 		rc = spdk_iobuf_channel_init(tgroup->buf_cache, transport->iobuf_name, 0, 0);
65973ba05f7SBen Walker 		if (rc != 0) {
66073ba05f7SBen Walker 			SPDK_ERRLOG("Unable to create an iobuf channel in the poll group.\n");
661847e8915SJacek Kalwas 			goto err;
662903fdb75SAlexey Marchuk 		}
66373ba05f7SBen Walker 	}
664d11601e8SKrzysztof Karas 
6655e373163SJohn Levon 	return tgroup;
666847e8915SJacek Kalwas err:
667847e8915SJacek Kalwas 	transport->ops->poll_group_destroy(tgroup);
668847e8915SJacek Kalwas 	return NULL;
669a4e28342SBen Walker }
670a4e28342SBen Walker 
671960460f0SZiye Yang struct spdk_nvmf_transport_poll_group *
67261d85773SSeth Howell nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
673960460f0SZiye Yang 				      struct spdk_nvmf_qpair *qpair)
674960460f0SZiye Yang {
675b17919d8SJacek Kalwas 	struct spdk_nvmf_transport_poll_group *tgroup;
676b17919d8SJacek Kalwas 
677960460f0SZiye Yang 	if (transport->ops->get_optimal_poll_group) {
678b17919d8SJacek Kalwas 		pthread_mutex_lock(&transport->mutex);
679b17919d8SJacek Kalwas 		tgroup = transport->ops->get_optimal_poll_group(qpair);
680b17919d8SJacek Kalwas 		pthread_mutex_unlock(&transport->mutex);
681b17919d8SJacek Kalwas 
682b17919d8SJacek Kalwas 		return tgroup;
683960460f0SZiye Yang 	} else {
684960460f0SZiye Yang 		return NULL;
685960460f0SZiye Yang 	}
686960460f0SZiye Yang }
687960460f0SZiye Yang 
688a4e28342SBen Walker void
68961d85773SSeth Howell nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
690a4e28342SBen Walker {
691b17919d8SJacek Kalwas 	struct spdk_nvmf_transport *transport;
692847e8915SJacek Kalwas 	struct spdk_iobuf_channel *ch = NULL;
693b17919d8SJacek Kalwas 
694b17919d8SJacek Kalwas 	transport = group->transport;
6958cb172f2SSeth Howell 
696*aac967c0SJim Harris 	spdk_poller_unregister(&group->poller);
697*aac967c0SJim Harris 
69897967681SShuhei Matsumoto 	if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
69997967681SShuhei Matsumoto 		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
70097967681SShuhei Matsumoto 	}
70197967681SShuhei Matsumoto 
702a68e0930SKalwas, Jacek 	if (nvmf_transport_use_iobuf(transport)) {
703847e8915SJacek Kalwas 		/* The call to poll_group_destroy both frees the group memory, but also
704847e8915SJacek Kalwas 		 * releases any remaining buffers. Cache channel pointer so we can still
705847e8915SJacek Kalwas 		 * release the resources after the group has been freed. */
70673ba05f7SBen Walker 		ch = group->buf_cache;
7078cb172f2SSeth Howell 	}
708b17919d8SJacek Kalwas 
709b17919d8SJacek Kalwas 	pthread_mutex_lock(&transport->mutex);
710b17919d8SJacek Kalwas 	transport->ops->poll_group_destroy(group);
711b17919d8SJacek Kalwas 	pthread_mutex_unlock(&transport->mutex);
71273ba05f7SBen Walker 
713a68e0930SKalwas, Jacek 	if (nvmf_transport_use_iobuf(transport)) {
714847e8915SJacek Kalwas 		spdk_iobuf_channel_fini(ch);
715847e8915SJacek Kalwas 		free(ch);
71673ba05f7SBen Walker 	}
717a4e28342SBen Walker }
718a4e28342SBen Walker 
719*aac967c0SJim Harris void
720*aac967c0SJim Harris nvmf_transport_poll_group_pause(struct spdk_nvmf_transport_poll_group *tgroup)
721*aac967c0SJim Harris {
722*aac967c0SJim Harris 	spdk_poller_unregister(&tgroup->poller);
723*aac967c0SJim Harris }
724*aac967c0SJim Harris 
725*aac967c0SJim Harris void
726*aac967c0SJim Harris nvmf_transport_poll_group_resume(struct spdk_nvmf_transport_poll_group *tgroup)
727*aac967c0SJim Harris {
728*aac967c0SJim Harris 	nvmf_transport_poll_group_create_poller(tgroup);
729*aac967c0SJim Harris }
730*aac967c0SJim Harris 
731a4e28342SBen Walker int
73261d85773SSeth Howell nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
733a4e28342SBen Walker 			      struct spdk_nvmf_qpair *qpair)
734a4e28342SBen Walker {
735a4e28342SBen Walker 	if (qpair->transport) {
736baa936a1SBen Walker 		assert(qpair->transport == group->transport);
737baa936a1SBen Walker 		if (qpair->transport != group->transport) {
738a4e28342SBen Walker 			return -1;
739a4e28342SBen Walker 		}
740a4e28342SBen Walker 	} else {
741baa936a1SBen Walker 		qpair->transport = group->transport;
742a4e28342SBen Walker 	}
743a4e28342SBen Walker 
744d11601e8SKrzysztof Karas 	SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_add, qpair, qpair->qid,
745d11601e8SKrzysztof Karas 			   spdk_thread_get_id(group->group->thread));
746d11601e8SKrzysztof Karas 
747baa936a1SBen Walker 	return group->transport->ops->poll_group_add(group, qpair);
748a4e28342SBen Walker }
749a4e28342SBen Walker 
750a4e28342SBen Walker int
75161d85773SSeth Howell nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
752527c825cSZiye Yang 				 struct spdk_nvmf_qpair *qpair)
753527c825cSZiye Yang {
754527c825cSZiye Yang 	int rc = ENOTSUP;
755527c825cSZiye Yang 
756d11601e8SKrzysztof Karas 	SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_remove, qpair, qpair->qid,
757d11601e8SKrzysztof Karas 			   spdk_thread_get_id(group->group->thread));
758d11601e8SKrzysztof Karas 
759527c825cSZiye Yang 	assert(qpair->transport == group->transport);
760527c825cSZiye Yang 	if (group->transport->ops->poll_group_remove) {
761527c825cSZiye Yang 		rc = group->transport->ops->poll_group_remove(group, qpair);
762527c825cSZiye Yang 	}
763527c825cSZiye Yang 
764527c825cSZiye Yang 	return rc;
765527c825cSZiye Yang }
766527c825cSZiye Yang 
767527c825cSZiye Yang int
76861d85773SSeth Howell nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
769d5ce9cffSBen Walker {
770d5ce9cffSBen Walker 	return group->transport->ops->poll_group_poll(group);
771d5ce9cffSBen Walker }
772d5ce9cffSBen Walker 
773d5ce9cffSBen Walker int
77461d85773SSeth Howell nvmf_transport_req_free(struct spdk_nvmf_request *req)
775388e3101SSeth Howell {
776388e3101SSeth Howell 	return req->qpair->transport->ops->req_free(req);
777388e3101SSeth Howell }
778388e3101SSeth Howell 
779388e3101SSeth Howell int
78061d85773SSeth Howell nvmf_transport_req_complete(struct spdk_nvmf_request *req)
781a4e28342SBen Walker {
782a4e28342SBen Walker 	return req->qpair->transport->ops->req_complete(req);
783a4e28342SBen Walker }
784a4e28342SBen Walker 
785a4e28342SBen Walker void
786ccd96eadSNaresh Gottumukkala nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair,
787ccd96eadSNaresh Gottumukkala 			  spdk_nvmf_transport_qpair_fini_cb cb_fn,
788ccd96eadSNaresh Gottumukkala 			  void *cb_arg)
789a4e28342SBen Walker {
790a266b6e4SKonrad Sztyber 	SPDK_DTRACE_PROBE1(nvmf_transport_qpair_fini, qpair);
791a266b6e4SKonrad Sztyber 
792ccd96eadSNaresh Gottumukkala 	qpair->transport->ops->qpair_fini(qpair, cb_fn, cb_arg);
793a4e28342SBen Walker }
794a4e28342SBen Walker 
7958f64db18SBen Walker int
79661d85773SSeth Howell nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
7978f64db18SBen Walker 				   struct spdk_nvme_transport_id *trid)
7988f64db18SBen Walker {
7998f64db18SBen Walker 	return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
8008f64db18SBen Walker }
801311ce0e2SBen Walker 
802311ce0e2SBen Walker int
80361d85773SSeth Howell nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
804f10a91edSBen Walker 				    struct spdk_nvme_transport_id *trid)
805f10a91edSBen Walker {
806f10a91edSBen Walker 	return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
807f10a91edSBen Walker }
808f10a91edSBen Walker 
809f10a91edSBen Walker int
81061d85773SSeth Howell nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
811311ce0e2SBen Walker 				     struct spdk_nvme_transport_id *trid)
812311ce0e2SBen Walker {
813311ce0e2SBen Walker 	return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
814311ce0e2SBen Walker }
815183d81d0SJohn Barnard 
816604b4503SShuhei Matsumoto void
817604b4503SShuhei Matsumoto nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
818604b4503SShuhei Matsumoto 				   struct spdk_nvmf_request *req)
819604b4503SShuhei Matsumoto {
820ff5c19b1SChangpeng Liu 	if (qpair->transport->ops->qpair_abort_request) {
821604b4503SShuhei Matsumoto 		qpair->transport->ops->qpair_abort_request(qpair, req);
822604b4503SShuhei Matsumoto 	}
823ff5c19b1SChangpeng Liu }
824604b4503SShuhei Matsumoto 
825183d81d0SJohn Barnard bool
8265b3e6cd1SSeth Howell spdk_nvmf_transport_opts_init(const char *transport_name,
8273b16c6ddSZiye Yang 			      struct spdk_nvmf_transport_opts *opts, size_t opts_size)
828183d81d0SJohn Barnard {
829183d81d0SJohn Barnard 	const struct spdk_nvmf_transport_ops *ops;
8303b16c6ddSZiye Yang 	struct spdk_nvmf_transport_opts opts_local = {};
831183d81d0SJohn Barnard 
8326dec2087SSeth Howell 	ops = nvmf_get_transport_ops(transport_name);
833183d81d0SJohn Barnard 	if (!ops) {
8345b3e6cd1SSeth Howell 		SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
835183d81d0SJohn Barnard 		return false;
836183d81d0SJohn Barnard 	}
837183d81d0SJohn Barnard 
8383b16c6ddSZiye Yang 	if (!opts) {
8393b16c6ddSZiye Yang 		SPDK_ERRLOG("opts should not be NULL\n");
8403b16c6ddSZiye Yang 		return false;
8413b16c6ddSZiye Yang 	}
8423b16c6ddSZiye Yang 
8433b16c6ddSZiye Yang 	if (!opts_size) {
8443b16c6ddSZiye Yang 		SPDK_ERRLOG("opts_size inside opts should not be zero value\n");
8453b16c6ddSZiye Yang 		return false;
8463b16c6ddSZiye Yang 	}
8473b16c6ddSZiye Yang 
8483b16c6ddSZiye Yang 	opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
84943022da3SJacek Kalwas 	opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
85065335336SKonrad Sztyber 	opts_local.disable_command_passthru = false;
8513b16c6ddSZiye Yang 	ops->opts_init(&opts_local);
8523b16c6ddSZiye Yang 
8533b16c6ddSZiye Yang 	nvmf_transport_opts_copy(opts, &opts_local, opts_size);
8543b16c6ddSZiye Yang 
855183d81d0SJohn Barnard 	return true;
856183d81d0SJohn Barnard }
857e956be96SZiye Yang 
858cc4d1f82SShuhei Matsumoto void
859cc4d1f82SShuhei Matsumoto spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
860cc4d1f82SShuhei Matsumoto 			       struct spdk_nvmf_transport_poll_group *group,
86179945ef0SShuhei Matsumoto 			       struct spdk_nvmf_transport *transport)
862cc4d1f82SShuhei Matsumoto {
863cc4d1f82SShuhei Matsumoto 	uint32_t i;
864cc4d1f82SShuhei Matsumoto 
865063c79d1SShuhei Matsumoto 	for (i = 0; i < req->iovcnt; i++) {
866847e8915SJacek Kalwas 		spdk_iobuf_put(group->buf_cache, req->iov[i].iov_base, req->iov[i].iov_len);
867cc4d1f82SShuhei Matsumoto 		req->iov[i].iov_base = NULL;
868cc4d1f82SShuhei Matsumoto 		req->iov[i].iov_len = 0;
869cc4d1f82SShuhei Matsumoto 	}
870f77387ccSJohn Levon 	req->iovcnt = 0;
871cc4d1f82SShuhei Matsumoto 	req->data_from_pool = false;
872cc4d1f82SShuhei Matsumoto }
873cc4d1f82SShuhei Matsumoto 
8740db0c443SChunsong Feng static int
875063c79d1SShuhei Matsumoto nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
876063c79d1SShuhei Matsumoto 			uint32_t io_unit_size)
877063c79d1SShuhei Matsumoto {
87873ba05f7SBen Walker 	req->iov[req->iovcnt].iov_base = buf;
879063c79d1SShuhei Matsumoto 	req->iov[req->iovcnt].iov_len  = spdk_min(length, io_unit_size);
880063c79d1SShuhei Matsumoto 	length -= req->iov[req->iovcnt].iov_len;
881063c79d1SShuhei Matsumoto 	req->iovcnt++;
882063c79d1SShuhei Matsumoto 
883063c79d1SShuhei Matsumoto 	return length;
884063c79d1SShuhei Matsumoto }
885063c79d1SShuhei Matsumoto 
88604621576SShuhei Matsumoto static int
887a48eba16SKrzysztof Goreczny nvmf_request_set_stripped_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
888a48eba16SKrzysztof Goreczny 				 uint32_t io_unit_size)
889a48eba16SKrzysztof Goreczny {
890a48eba16SKrzysztof Goreczny 	struct spdk_nvmf_stripped_data *data = req->stripped_data;
891a48eba16SKrzysztof Goreczny 
892a48eba16SKrzysztof Goreczny 	data->iov[data->iovcnt].iov_base = buf;
893a48eba16SKrzysztof Goreczny 	data->iov[data->iovcnt].iov_len  = spdk_min(length, io_unit_size);
894a48eba16SKrzysztof Goreczny 	length -= data->iov[data->iovcnt].iov_len;
895a48eba16SKrzysztof Goreczny 	data->iovcnt++;
896a48eba16SKrzysztof Goreczny 
897a48eba16SKrzysztof Goreczny 	return length;
898a48eba16SKrzysztof Goreczny }
899a48eba16SKrzysztof Goreczny 
90018ede8d3SKrzysztof Goreczny static void nvmf_request_iobuf_get_cb(struct spdk_iobuf_entry *entry, void *buf);
90118ede8d3SKrzysztof Goreczny 
902a48eba16SKrzysztof Goreczny static int
90304621576SShuhei Matsumoto nvmf_request_get_buffers(struct spdk_nvmf_request *req,
904cc4d1f82SShuhei Matsumoto 			 struct spdk_nvmf_transport_poll_group *group,
905cc4d1f82SShuhei Matsumoto 			 struct spdk_nvmf_transport *transport,
9060db0c443SChunsong Feng 			 uint32_t length, uint32_t io_unit_size,
90718ede8d3SKrzysztof Goreczny 			 bool stripped_buffers)
908cc4d1f82SShuhei Matsumoto {
90918ede8d3SKrzysztof Goreczny 	struct spdk_iobuf_entry *entry = NULL;
9107c7a0c0aSShuhei Matsumoto 	uint32_t num_buffers;
91173ba05f7SBen Walker 	uint32_t i = 0;
91273ba05f7SBen Walker 	void *buffer;
913cc4d1f82SShuhei Matsumoto 
91422cd4fe2SShuhei Matsumoto 	/* If the number of buffers is too large, then we know the I/O is larger than allowed.
91522cd4fe2SShuhei Matsumoto 	 *  Fail it.
91622cd4fe2SShuhei Matsumoto 	 */
917063c79d1SShuhei Matsumoto 	num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
918e718d8caSAlexey Marchuk 	if (spdk_unlikely(num_buffers > NVMF_REQ_MAX_BUFFERS)) {
91922cd4fe2SShuhei Matsumoto 		return -EINVAL;
92022cd4fe2SShuhei Matsumoto 	}
92122cd4fe2SShuhei Matsumoto 
92218ede8d3SKrzysztof Goreczny 	/* Use iobuf queuing only if transport supports it */
92318ede8d3SKrzysztof Goreczny 	if (transport->ops->req_get_buffers_done != NULL) {
92418ede8d3SKrzysztof Goreczny 		entry = &req->iobuf.entry;
92518ede8d3SKrzysztof Goreczny 	}
92618ede8d3SKrzysztof Goreczny 
927cc4d1f82SShuhei Matsumoto 	while (i < num_buffers) {
92818ede8d3SKrzysztof Goreczny 		buffer = spdk_iobuf_get(group->buf_cache, spdk_min(io_unit_size, length), entry,
92918ede8d3SKrzysztof Goreczny 					nvmf_request_iobuf_get_cb);
930e718d8caSAlexey Marchuk 		if (spdk_unlikely(buffer == NULL)) {
93118ede8d3SKrzysztof Goreczny 			req->iobuf.remaining_length = length;
93204621576SShuhei Matsumoto 			return -ENOMEM;
933cc4d1f82SShuhei Matsumoto 		}
93418ede8d3SKrzysztof Goreczny 		if (stripped_buffers) {
93518ede8d3SKrzysztof Goreczny 			length = nvmf_request_set_stripped_buffer(req, buffer, length, io_unit_size);
93618ede8d3SKrzysztof Goreczny 		} else {
93718ede8d3SKrzysztof Goreczny 			length = nvmf_request_set_buffer(req, buffer, length, io_unit_size);
93818ede8d3SKrzysztof Goreczny 		}
93973ba05f7SBen Walker 		i++;
940cc4d1f82SShuhei Matsumoto 	}
941cc4d1f82SShuhei Matsumoto 
942063c79d1SShuhei Matsumoto 	assert(length == 0);
94318ede8d3SKrzysztof Goreczny 	req->data_from_pool = true;
944c0ee8ef7SShuhei Matsumoto 
945cc4d1f82SShuhei Matsumoto 	return 0;
94604621576SShuhei Matsumoto }
94704621576SShuhei Matsumoto 
94818ede8d3SKrzysztof Goreczny static void
94918ede8d3SKrzysztof Goreczny nvmf_request_iobuf_get_cb(struct spdk_iobuf_entry *entry, void *buf)
95018ede8d3SKrzysztof Goreczny {
95118ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_request *req = SPDK_CONTAINEROF(entry, struct spdk_nvmf_request, iobuf.entry);
95218ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_transport *transport = req->qpair->transport;
95318ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_poll_group *group = req->qpair->group;
95418ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(group, transport);
95518ede8d3SKrzysztof Goreczny 	uint32_t length = req->iobuf.remaining_length;
95618ede8d3SKrzysztof Goreczny 	uint32_t io_unit_size = transport->opts.io_unit_size;
95718ede8d3SKrzysztof Goreczny 	int rc;
95818ede8d3SKrzysztof Goreczny 
95918ede8d3SKrzysztof Goreczny 	assert(tgroup != NULL);
96018ede8d3SKrzysztof Goreczny 
96118ede8d3SKrzysztof Goreczny 	length = nvmf_request_set_buffer(req, buf, length, io_unit_size);
96218ede8d3SKrzysztof Goreczny 	rc = nvmf_request_get_buffers(req, tgroup, transport, length, io_unit_size, false);
96318ede8d3SKrzysztof Goreczny 	if (rc == 0) {
96418ede8d3SKrzysztof Goreczny 		transport->ops->req_get_buffers_done(req);
96518ede8d3SKrzysztof Goreczny 	}
96618ede8d3SKrzysztof Goreczny }
96718ede8d3SKrzysztof Goreczny 
96804621576SShuhei Matsumoto int
96904621576SShuhei Matsumoto spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
97004621576SShuhei Matsumoto 			      struct spdk_nvmf_transport_poll_group *group,
97104621576SShuhei Matsumoto 			      struct spdk_nvmf_transport *transport,
97204621576SShuhei Matsumoto 			      uint32_t length)
97304621576SShuhei Matsumoto {
97404621576SShuhei Matsumoto 	int rc;
97504621576SShuhei Matsumoto 
976a68e0930SKalwas, Jacek 	assert(nvmf_transport_use_iobuf(transport));
977a68e0930SKalwas, Jacek 
97804621576SShuhei Matsumoto 	req->iovcnt = 0;
97918ede8d3SKrzysztof Goreczny 	rc = nvmf_request_get_buffers(req, group, transport, length, transport->opts.io_unit_size, false);
98018ede8d3SKrzysztof Goreczny 	if (spdk_unlikely(rc == -ENOMEM && transport->ops->req_get_buffers_done == NULL)) {
98104621576SShuhei Matsumoto 		spdk_nvmf_request_free_buffers(req, group, transport);
98204621576SShuhei Matsumoto 	}
98304621576SShuhei Matsumoto 
98404621576SShuhei Matsumoto 	return rc;
98504621576SShuhei Matsumoto }
9860db0c443SChunsong Feng 
98718ede8d3SKrzysztof Goreczny static int
98818ede8d3SKrzysztof Goreczny nvmf_request_get_buffers_abort_cb(struct spdk_iobuf_channel *ch, struct spdk_iobuf_entry *entry,
98918ede8d3SKrzysztof Goreczny 				  void *cb_ctx)
99018ede8d3SKrzysztof Goreczny {
99118ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_request *req, *req_to_abort = cb_ctx;
99218ede8d3SKrzysztof Goreczny 
99318ede8d3SKrzysztof Goreczny 	req = SPDK_CONTAINEROF(entry, struct spdk_nvmf_request, iobuf.entry);
99418ede8d3SKrzysztof Goreczny 	if (req != req_to_abort) {
99518ede8d3SKrzysztof Goreczny 		return 0;
99618ede8d3SKrzysztof Goreczny 	}
99718ede8d3SKrzysztof Goreczny 
99818ede8d3SKrzysztof Goreczny 	spdk_iobuf_entry_abort(ch, entry, spdk_min(req->iobuf.remaining_length,
99918ede8d3SKrzysztof Goreczny 			       req->qpair->transport->opts.io_unit_size));
100018ede8d3SKrzysztof Goreczny 	return 1;
100118ede8d3SKrzysztof Goreczny }
100218ede8d3SKrzysztof Goreczny 
100318ede8d3SKrzysztof Goreczny bool
100418ede8d3SKrzysztof Goreczny nvmf_request_get_buffers_abort(struct spdk_nvmf_request *req)
100518ede8d3SKrzysztof Goreczny {
100618ede8d3SKrzysztof Goreczny 	struct spdk_nvmf_transport_poll_group *tgroup = nvmf_get_transport_poll_group(req->qpair->group,
100718ede8d3SKrzysztof Goreczny 			req->qpair->transport);
100818ede8d3SKrzysztof Goreczny 	int rc;
100918ede8d3SKrzysztof Goreczny 
101018ede8d3SKrzysztof Goreczny 	assert(tgroup != NULL);
101118ede8d3SKrzysztof Goreczny 
1012b82fd48aSJim Harris 	rc = spdk_iobuf_for_each_entry(tgroup->buf_cache, nvmf_request_get_buffers_abort_cb, req);
101318ede8d3SKrzysztof Goreczny 	return rc == 1;
101418ede8d3SKrzysztof Goreczny }
101518ede8d3SKrzysztof Goreczny 
10160db0c443SChunsong Feng void
10170db0c443SChunsong Feng nvmf_request_free_stripped_buffers(struct spdk_nvmf_request *req,
10180db0c443SChunsong Feng 				   struct spdk_nvmf_transport_poll_group *group,
10190db0c443SChunsong Feng 				   struct spdk_nvmf_transport *transport)
10200db0c443SChunsong Feng {
10210db0c443SChunsong Feng 	struct spdk_nvmf_stripped_data *data = req->stripped_data;
10220db0c443SChunsong Feng 	uint32_t i;
10230db0c443SChunsong Feng 
10240db0c443SChunsong Feng 	for (i = 0; i < data->iovcnt; i++) {
1025847e8915SJacek Kalwas 		spdk_iobuf_put(group->buf_cache, data->iov[i].iov_base, data->iov[i].iov_len);
10260db0c443SChunsong Feng 	}
10270db0c443SChunsong Feng 	free(data);
10280db0c443SChunsong Feng 	req->stripped_data = NULL;
10290db0c443SChunsong Feng }
10300db0c443SChunsong Feng 
10310db0c443SChunsong Feng int
10320db0c443SChunsong Feng nvmf_request_get_stripped_buffers(struct spdk_nvmf_request *req,
10330db0c443SChunsong Feng 				  struct spdk_nvmf_transport_poll_group *group,
10340db0c443SChunsong Feng 				  struct spdk_nvmf_transport *transport,
10350db0c443SChunsong Feng 				  uint32_t length)
10360db0c443SChunsong Feng {
10370db0c443SChunsong Feng 	uint32_t block_size = req->dif.dif_ctx.block_size;
10380db0c443SChunsong Feng 	uint32_t data_block_size = block_size - req->dif.dif_ctx.md_size;
10390db0c443SChunsong Feng 	uint32_t io_unit_size = transport->opts.io_unit_size / block_size * data_block_size;
10400db0c443SChunsong Feng 	struct spdk_nvmf_stripped_data *data;
10410db0c443SChunsong Feng 	uint32_t i;
10420db0c443SChunsong Feng 	int rc;
10430db0c443SChunsong Feng 
104418ede8d3SKrzysztof Goreczny 	/* We don't support iobuf queueing with stripped buffers yet */
104518ede8d3SKrzysztof Goreczny 	assert(transport->ops->req_get_buffers_done == NULL);
104618ede8d3SKrzysztof Goreczny 
10470db0c443SChunsong Feng 	/* Data blocks must be block aligned */
10480db0c443SChunsong Feng 	for (i = 0; i < req->iovcnt; i++) {
10490db0c443SChunsong Feng 		if (req->iov[i].iov_len % block_size) {
10500db0c443SChunsong Feng 			return -EINVAL;
10510db0c443SChunsong Feng 		}
10520db0c443SChunsong Feng 	}
10530db0c443SChunsong Feng 
10540db0c443SChunsong Feng 	data = calloc(1, sizeof(*data));
10550db0c443SChunsong Feng 	if (data == NULL) {
10560db0c443SChunsong Feng 		SPDK_ERRLOG("Unable to allocate memory for stripped_data.\n");
10570db0c443SChunsong Feng 		return -ENOMEM;
10580db0c443SChunsong Feng 	}
10590db0c443SChunsong Feng 	req->stripped_data = data;
10600db0c443SChunsong Feng 	req->stripped_data->iovcnt = 0;
10610db0c443SChunsong Feng 
106218ede8d3SKrzysztof Goreczny 	rc = nvmf_request_get_buffers(req, group, transport, length, io_unit_size, true);
10630db0c443SChunsong Feng 	if (rc == -ENOMEM) {
10640db0c443SChunsong Feng 		nvmf_request_free_stripped_buffers(req, group, transport);
10650db0c443SChunsong Feng 		return rc;
10660db0c443SChunsong Feng 	}
10670db0c443SChunsong Feng 	return rc;
10680db0c443SChunsong Feng }
1069