xref: /spdk/lib/nvmf/transport.c (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2018-2019, 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "nvmf_internal.h"
9 #include "transport.h"
10 
11 #include "spdk/config.h"
12 #include "spdk/log.h"
13 #include "spdk/nvmf.h"
14 #include "spdk/nvmf_transport.h"
15 #include "spdk/queue.h"
16 #include "spdk/util.h"
17 #include "spdk_internal/usdt.h"
18 
19 #define MAX_MEMPOOL_NAME_LENGTH 40
20 #define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
21 
22 struct nvmf_transport_ops_list_element {
23 	struct spdk_nvmf_transport_ops			ops;
24 	TAILQ_ENTRY(nvmf_transport_ops_list_element)	link;
25 };
26 
27 TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
28 g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
29 
30 static inline const struct spdk_nvmf_transport_ops *
31 nvmf_get_transport_ops(const char *transport_name)
32 {
33 	struct nvmf_transport_ops_list_element *ops;
34 	TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
35 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
36 			return &ops->ops;
37 		}
38 	}
39 	return NULL;
40 }
41 
42 void
43 spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
44 {
45 	struct nvmf_transport_ops_list_element *new_ops;
46 
47 	if (nvmf_get_transport_ops(ops->name) != NULL) {
48 		SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
49 		assert(false);
50 		return;
51 	}
52 
53 	new_ops = calloc(1, sizeof(*new_ops));
54 	if (new_ops == NULL) {
55 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
56 		assert(false);
57 		return;
58 	}
59 
60 	new_ops->ops = *ops;
61 
62 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
63 }
64 
65 const struct spdk_nvmf_transport_opts *
66 spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
67 {
68 	return &transport->opts;
69 }
70 
71 void
72 nvmf_transport_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w,
73 			 bool named)
74 {
75 	const struct spdk_nvmf_transport_opts *opts = spdk_nvmf_get_transport_opts(transport);
76 
77 	named ? spdk_json_write_named_object_begin(w, "params") : spdk_json_write_object_begin(w);
78 
79 	spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
80 	spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
81 	spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
82 	spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
83 	spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
84 	spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
85 	spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
86 	spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);
87 	spdk_json_write_named_uint32(w, "buf_cache_size", opts->buf_cache_size);
88 	spdk_json_write_named_bool(w, "dif_insert_or_strip", opts->dif_insert_or_strip);
89 	spdk_json_write_named_bool(w, "zcopy", opts->zcopy);
90 
91 	if (transport->ops->dump_opts) {
92 		transport->ops->dump_opts(transport, w);
93 	}
94 
95 	spdk_json_write_named_uint32(w, "abort_timeout_sec", opts->abort_timeout_sec);
96 	spdk_json_write_object_end(w);
97 }
98 
99 void
100 nvmf_transport_listen_dump_opts(struct spdk_nvmf_transport *transport,
101 				const struct spdk_nvme_transport_id *trid, struct spdk_json_write_ctx *w)
102 {
103 	const char *adrfam = spdk_nvme_transport_id_adrfam_str(trid->adrfam);
104 
105 	spdk_json_write_named_object_begin(w, "listen_address");
106 
107 	spdk_json_write_named_string(w, "trtype", trid->trstring);
108 	spdk_json_write_named_string(w, "adrfam", adrfam ? adrfam : "unknown");
109 	spdk_json_write_named_string(w, "traddr", trid->traddr);
110 	spdk_json_write_named_string(w, "trsvcid", trid->trsvcid);
111 
112 	if (transport->ops->listen_dump_opts) {
113 		transport->ops->listen_dump_opts(transport, trid, w);
114 	}
115 
116 	spdk_json_write_object_end(w);
117 }
118 
119 spdk_nvme_transport_type_t
120 spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
121 {
122 	return transport->ops->type;
123 }
124 
125 const char *
126 spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
127 {
128 	return transport->ops->name;
129 }
130 
131 static void
132 nvmf_transport_opts_copy(struct spdk_nvmf_transport_opts *opts,
133 			 struct spdk_nvmf_transport_opts *opts_src,
134 			 size_t opts_size)
135 {
136 	assert(opts);
137 	assert(opts_src);
138 
139 	opts->opts_size = opts_size;
140 
141 #define SET_FIELD(field) \
142 	if (offsetof(struct spdk_nvmf_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
143 		opts->field = opts_src->field; \
144 	} \
145 
146 	SET_FIELD(max_queue_depth);
147 	SET_FIELD(max_qpairs_per_ctrlr);
148 	SET_FIELD(in_capsule_data_size);
149 	SET_FIELD(max_io_size);
150 	SET_FIELD(io_unit_size);
151 	SET_FIELD(max_aq_depth);
152 	SET_FIELD(buf_cache_size);
153 	SET_FIELD(num_shared_buffers);
154 	SET_FIELD(dif_insert_or_strip);
155 	SET_FIELD(abort_timeout_sec);
156 	SET_FIELD(association_timeout);
157 	SET_FIELD(transport_specific);
158 	SET_FIELD(acceptor_poll_rate);
159 	SET_FIELD(zcopy);
160 
161 	/* Do not remove this statement, you should always update this statement when you adding a new field,
162 	 * and do not forget to add the SET_FIELD statement for your added field. */
163 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvmf_transport_opts) == 64, "Incorrect size");
164 
165 #undef SET_FIELD
166 #undef FILED_CHECK
167 }
168 
169 struct spdk_nvmf_transport *
170 spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
171 {
172 	const struct spdk_nvmf_transport_ops *ops = NULL;
173 	struct spdk_nvmf_transport *transport;
174 	char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH];
175 	int chars_written;
176 	struct spdk_nvmf_transport_opts opts_local = {};
177 
178 	if (!opts) {
179 		SPDK_ERRLOG("opts should not be NULL\n");
180 		return NULL;
181 	}
182 
183 	if (!opts->opts_size) {
184 		SPDK_ERRLOG("The opts_size in opts structure should not be zero\n");
185 		return NULL;
186 	}
187 
188 	ops = nvmf_get_transport_ops(transport_name);
189 	if (!ops) {
190 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
191 		return NULL;
192 	}
193 	nvmf_transport_opts_copy(&opts_local, opts, opts->opts_size);
194 
195 	if (opts_local.max_io_size != 0 && (!spdk_u32_is_pow2(opts_local.max_io_size) ||
196 					    opts_local.max_io_size < 8192)) {
197 		SPDK_ERRLOG("max_io_size %u must be a power of 2 and be greater than or equal 8KB\n",
198 			    opts_local.max_io_size);
199 		return NULL;
200 	}
201 
202 	if (opts_local.max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
203 		SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
204 			    opts_local.max_aq_depth);
205 		opts_local.max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
206 	}
207 
208 	transport = ops->create(&opts_local);
209 	if (!transport) {
210 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
211 		return NULL;
212 	}
213 
214 	pthread_mutex_init(&transport->mutex, NULL);
215 	TAILQ_INIT(&transport->listeners);
216 
217 	transport->ops = ops;
218 	transport->opts = opts_local;
219 
220 	chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf",
221 				 transport_name, "data");
222 	if (chars_written < 0) {
223 		SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
224 		ops->destroy(transport, NULL, NULL);
225 		return NULL;
226 	}
227 
228 	if (opts_local.num_shared_buffers) {
229 		transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name,
230 					   opts_local.num_shared_buffers,
231 					   opts_local.io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
232 					   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
233 					   SPDK_ENV_SOCKET_ID_ANY);
234 
235 		if (!transport->data_buf_pool) {
236 			if (spdk_mempool_lookup(spdk_mempool_name) != NULL) {
237 				SPDK_ERRLOG("Unable to allocate poll group buffer pull: already exists\n");
238 				SPDK_ERRLOG("Probably running in multiprocess environment, which is "
239 					    "unsupported by the nvmf library\n");
240 			} else {
241 				SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
242 			}
243 			ops->destroy(transport, NULL, NULL);
244 			return NULL;
245 		}
246 	}
247 
248 	return transport;
249 }
250 
251 struct spdk_nvmf_transport *
252 spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
253 {
254 	return TAILQ_FIRST(&tgt->transports);
255 }
256 
257 struct spdk_nvmf_transport *
258 spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
259 {
260 	return TAILQ_NEXT(transport, link);
261 }
262 
263 int
264 spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
265 			    spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
266 {
267 	struct spdk_nvmf_listener *listener, *listener_tmp;
268 
269 	if (transport->data_buf_pool != NULL) {
270 		if (spdk_mempool_count(transport->data_buf_pool) !=
271 		    transport->opts.num_shared_buffers) {
272 			SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
273 				    spdk_mempool_count(transport->data_buf_pool),
274 				    transport->opts.num_shared_buffers);
275 		}
276 		spdk_mempool_free(transport->data_buf_pool);
277 	}
278 
279 	TAILQ_FOREACH_SAFE(listener, &transport->listeners, link, listener_tmp) {
280 		TAILQ_REMOVE(&transport->listeners, listener, link);
281 		transport->ops->stop_listen(transport, &listener->trid);
282 		free(listener);
283 	}
284 
285 	pthread_mutex_destroy(&transport->mutex);
286 	return transport->ops->destroy(transport, cb_fn, cb_arg);
287 }
288 
289 struct spdk_nvmf_listener *
290 nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
291 			     const struct spdk_nvme_transport_id *trid)
292 {
293 	struct spdk_nvmf_listener *listener;
294 
295 	TAILQ_FOREACH(listener, &transport->listeners, link) {
296 		if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
297 			return listener;
298 		}
299 	}
300 
301 	return NULL;
302 }
303 
304 int
305 spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
306 			   const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts)
307 {
308 	struct spdk_nvmf_listener *listener;
309 	int rc;
310 
311 	listener = nvmf_transport_find_listener(transport, trid);
312 	if (!listener) {
313 		listener = calloc(1, sizeof(*listener));
314 		if (!listener) {
315 			return -ENOMEM;
316 		}
317 
318 		listener->ref = 1;
319 		listener->trid = *trid;
320 		TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
321 		pthread_mutex_lock(&transport->mutex);
322 		rc = transport->ops->listen(transport, &listener->trid, opts);
323 		pthread_mutex_unlock(&transport->mutex);
324 		if (rc != 0) {
325 			TAILQ_REMOVE(&transport->listeners, listener, link);
326 			free(listener);
327 		}
328 		return rc;
329 	}
330 
331 	++listener->ref;
332 
333 	return 0;
334 }
335 
336 int
337 spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
338 				const struct spdk_nvme_transport_id *trid)
339 {
340 	struct spdk_nvmf_listener *listener;
341 
342 	listener = nvmf_transport_find_listener(transport, trid);
343 	if (!listener) {
344 		return -ENOENT;
345 	}
346 
347 	if (--listener->ref == 0) {
348 		TAILQ_REMOVE(&transport->listeners, listener, link);
349 		pthread_mutex_lock(&transport->mutex);
350 		transport->ops->stop_listen(transport, trid);
351 		pthread_mutex_unlock(&transport->mutex);
352 		free(listener);
353 	}
354 
355 	return 0;
356 }
357 
358 struct nvmf_stop_listen_ctx {
359 	struct spdk_nvmf_transport *transport;
360 	struct spdk_nvme_transport_id trid;
361 	struct spdk_nvmf_subsystem *subsystem;
362 	spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
363 	void *cb_arg;
364 };
365 
366 static void
367 nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
368 {
369 	struct nvmf_stop_listen_ctx *ctx;
370 	struct spdk_nvmf_transport *transport;
371 	int rc = status;
372 
373 	ctx = spdk_io_channel_iter_get_ctx(i);
374 	transport = ctx->transport;
375 	assert(transport != NULL);
376 
377 	rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
378 	if (rc) {
379 		SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
380 	}
381 
382 	if (ctx->cb_fn) {
383 		ctx->cb_fn(ctx->cb_arg, rc);
384 	}
385 	free(ctx);
386 }
387 
388 static void
389 nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
390 {
391 	struct nvmf_stop_listen_ctx *ctx;
392 	struct spdk_nvmf_poll_group *group;
393 	struct spdk_io_channel *ch;
394 	struct spdk_nvmf_qpair *qpair, *tmp_qpair;
395 	struct spdk_nvme_transport_id tmp_trid;
396 
397 	ctx = spdk_io_channel_iter_get_ctx(i);
398 	ch = spdk_io_channel_iter_get_channel(i);
399 	group = spdk_io_channel_get_ctx(ch);
400 
401 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
402 		/* skip qpairs that don't match the TRID. */
403 		if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
404 			continue;
405 		}
406 
407 		if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
408 			if (ctx->subsystem == NULL || qpair->ctrlr == NULL ||
409 			    ctx->subsystem == qpair->ctrlr->subsys) {
410 				spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
411 			}
412 		}
413 	}
414 	spdk_for_each_channel_continue(i, 0);
415 }
416 
417 int
418 spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
419 				      const struct spdk_nvme_transport_id *trid,
420 				      struct spdk_nvmf_subsystem *subsystem,
421 				      spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
422 				      void *cb_arg)
423 {
424 	struct nvmf_stop_listen_ctx *ctx;
425 
426 	ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
427 	if (ctx == NULL) {
428 		return -ENOMEM;
429 	}
430 
431 	ctx->trid = *trid;
432 	ctx->subsystem = subsystem;
433 	ctx->transport = transport;
434 	ctx->cb_fn = cb_fn;
435 	ctx->cb_arg = cb_arg;
436 
437 	spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
438 			      nvmf_stop_listen_fini);
439 
440 	return 0;
441 }
442 
443 void
444 nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
445 				 struct spdk_nvme_transport_id *trid,
446 				 struct spdk_nvmf_discovery_log_page_entry *entry)
447 {
448 	transport->ops->listener_discover(transport, trid, entry);
449 }
450 
451 struct spdk_nvmf_transport_poll_group *
452 nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
453 				 struct spdk_nvmf_poll_group *group)
454 {
455 	struct spdk_nvmf_transport_poll_group *tgroup;
456 	struct spdk_nvmf_transport_pg_cache_buf **bufs;
457 	uint32_t i;
458 
459 	pthread_mutex_lock(&transport->mutex);
460 	tgroup = transport->ops->poll_group_create(transport, group);
461 	pthread_mutex_unlock(&transport->mutex);
462 	if (!tgroup) {
463 		return NULL;
464 	}
465 	tgroup->transport = transport;
466 
467 	STAILQ_INIT(&tgroup->pending_buf_queue);
468 	STAILQ_INIT(&tgroup->buf_cache);
469 
470 	if (transport->opts.buf_cache_size) {
471 		tgroup->buf_cache_size = transport->opts.buf_cache_size;
472 		bufs = calloc(tgroup->buf_cache_size, sizeof(struct spdk_nvmf_transport_pg_cache_buf *));
473 
474 		if (!bufs) {
475 			SPDK_ERRLOG("Memory allocation failed, can't reserve buffers for the pg buffer cache\n");
476 			return tgroup;
477 		}
478 
479 		if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
480 			tgroup->buf_cache_size = (uint32_t)spdk_mempool_count(transport->data_buf_pool);
481 			SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache. "
482 				       "Decrease the number of cached buffers from %u to %u\n",
483 				       transport->opts.buf_cache_size, tgroup->buf_cache_size);
484 			/* Sanity check */
485 			assert(tgroup->buf_cache_size <= transport->opts.buf_cache_size);
486 			/* Try again with less number of buffers */
487 			if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, tgroup->buf_cache_size)) {
488 				SPDK_NOTICELOG("Failed to reserve %u buffers\n", tgroup->buf_cache_size);
489 				tgroup->buf_cache_size = 0;
490 			}
491 		}
492 
493 		for (i = 0; i < tgroup->buf_cache_size; i++) {
494 			STAILQ_INSERT_HEAD(&tgroup->buf_cache, bufs[i], link);
495 		}
496 		tgroup->buf_cache_count = tgroup->buf_cache_size;
497 
498 		free(bufs);
499 	}
500 
501 	return tgroup;
502 }
503 
504 struct spdk_nvmf_transport_poll_group *
505 nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
506 				      struct spdk_nvmf_qpair *qpair)
507 {
508 	struct spdk_nvmf_transport_poll_group *tgroup;
509 
510 	if (transport->ops->get_optimal_poll_group) {
511 		pthread_mutex_lock(&transport->mutex);
512 		tgroup = transport->ops->get_optimal_poll_group(qpair);
513 		pthread_mutex_unlock(&transport->mutex);
514 
515 		return tgroup;
516 	} else {
517 		return NULL;
518 	}
519 }
520 
521 void
522 nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
523 {
524 	struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp;
525 	struct spdk_nvmf_transport *transport;
526 
527 	transport = group->transport;
528 
529 	if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
530 		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
531 	}
532 
533 	STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) {
534 		STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link);
535 		spdk_mempool_put(transport->data_buf_pool, buf);
536 	}
537 
538 	pthread_mutex_lock(&transport->mutex);
539 	transport->ops->poll_group_destroy(group);
540 	pthread_mutex_unlock(&transport->mutex);
541 }
542 
543 int
544 nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
545 			      struct spdk_nvmf_qpair *qpair)
546 {
547 	if (qpair->transport) {
548 		assert(qpair->transport == group->transport);
549 		if (qpair->transport != group->transport) {
550 			return -1;
551 		}
552 	} else {
553 		qpair->transport = group->transport;
554 	}
555 
556 	SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_add, qpair, qpair->qid,
557 			   spdk_thread_get_id(group->group->thread));
558 
559 	return group->transport->ops->poll_group_add(group, qpair);
560 }
561 
562 int
563 nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
564 				 struct spdk_nvmf_qpair *qpair)
565 {
566 	int rc = ENOTSUP;
567 
568 	SPDK_DTRACE_PROBE3(nvmf_transport_poll_group_remove, qpair, qpair->qid,
569 			   spdk_thread_get_id(group->group->thread));
570 
571 	assert(qpair->transport == group->transport);
572 	if (group->transport->ops->poll_group_remove) {
573 		rc = group->transport->ops->poll_group_remove(group, qpair);
574 	}
575 
576 	return rc;
577 }
578 
579 int
580 nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
581 {
582 	return group->transport->ops->poll_group_poll(group);
583 }
584 
585 int
586 nvmf_transport_req_free(struct spdk_nvmf_request *req)
587 {
588 	return req->qpair->transport->ops->req_free(req);
589 }
590 
591 int
592 nvmf_transport_req_complete(struct spdk_nvmf_request *req)
593 {
594 	return req->qpair->transport->ops->req_complete(req);
595 }
596 
597 void
598 nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair,
599 			  spdk_nvmf_transport_qpair_fini_cb cb_fn,
600 			  void *cb_arg)
601 {
602 	SPDK_DTRACE_PROBE1(nvmf_transport_qpair_fini, qpair);
603 
604 	qpair->transport->ops->qpair_fini(qpair, cb_fn, cb_arg);
605 }
606 
607 int
608 nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
609 				   struct spdk_nvme_transport_id *trid)
610 {
611 	return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
612 }
613 
614 int
615 nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
616 				    struct spdk_nvme_transport_id *trid)
617 {
618 	return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
619 }
620 
621 int
622 nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
623 				     struct spdk_nvme_transport_id *trid)
624 {
625 	return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
626 }
627 
628 void
629 nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
630 				   struct spdk_nvmf_request *req)
631 {
632 	if (qpair->transport->ops->qpair_abort_request) {
633 		qpair->transport->ops->qpair_abort_request(qpair, req);
634 	}
635 }
636 
637 bool
638 spdk_nvmf_transport_opts_init(const char *transport_name,
639 			      struct spdk_nvmf_transport_opts *opts, size_t opts_size)
640 {
641 	const struct spdk_nvmf_transport_ops *ops;
642 	struct spdk_nvmf_transport_opts opts_local = {};
643 
644 	ops = nvmf_get_transport_ops(transport_name);
645 	if (!ops) {
646 		SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
647 		return false;
648 	}
649 
650 	if (!opts) {
651 		SPDK_ERRLOG("opts should not be NULL\n");
652 		return false;
653 	}
654 
655 	if (!opts_size) {
656 		SPDK_ERRLOG("opts_size inside opts should not be zero value\n");
657 		return false;
658 	}
659 
660 	opts_local.association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
661 	opts_local.acceptor_poll_rate = SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US;
662 	ops->opts_init(&opts_local);
663 
664 	nvmf_transport_opts_copy(opts, &opts_local, opts_size);
665 
666 	return true;
667 }
668 
669 void
670 spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
671 			       struct spdk_nvmf_transport_poll_group *group,
672 			       struct spdk_nvmf_transport *transport)
673 {
674 	uint32_t i;
675 
676 	for (i = 0; i < req->iovcnt; i++) {
677 		if (group->buf_cache_count < group->buf_cache_size) {
678 			STAILQ_INSERT_HEAD(&group->buf_cache,
679 					   (struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
680 					   link);
681 			group->buf_cache_count++;
682 		} else {
683 			spdk_mempool_put(transport->data_buf_pool, req->buffers[i]);
684 		}
685 		req->iov[i].iov_base = NULL;
686 		req->buffers[i] = NULL;
687 		req->iov[i].iov_len = 0;
688 	}
689 	req->data_from_pool = false;
690 }
691 
692 typedef int (*set_buffer_callback)(struct spdk_nvmf_request *req, void *buf,
693 				   uint32_t length,	uint32_t io_unit_size);
694 static int
695 nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
696 			uint32_t io_unit_size)
697 {
698 	req->buffers[req->iovcnt] = buf;
699 	req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
700 					 ~NVMF_DATA_BUFFER_MASK);
701 	req->iov[req->iovcnt].iov_len  = spdk_min(length, io_unit_size);
702 	length -= req->iov[req->iovcnt].iov_len;
703 	req->iovcnt++;
704 
705 	return length;
706 }
707 
708 static int
709 nvmf_request_get_buffers(struct spdk_nvmf_request *req,
710 			 struct spdk_nvmf_transport_poll_group *group,
711 			 struct spdk_nvmf_transport *transport,
712 			 uint32_t length, uint32_t io_unit_size,
713 			 set_buffer_callback cb_func)
714 {
715 	uint32_t num_buffers;
716 	uint32_t i = 0, j;
717 	void *buffer, *buffers[NVMF_REQ_MAX_BUFFERS];
718 
719 	/* If the number of buffers is too large, then we know the I/O is larger than allowed.
720 	 *  Fail it.
721 	 */
722 	num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
723 	if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
724 		return -EINVAL;
725 	}
726 
727 	while (i < num_buffers) {
728 		if (!(STAILQ_EMPTY(&group->buf_cache))) {
729 			group->buf_cache_count--;
730 			buffer = STAILQ_FIRST(&group->buf_cache);
731 			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
732 			assert(buffer != NULL);
733 
734 			length = cb_func(req, buffer, length, io_unit_size);
735 			i++;
736 		} else {
737 			if (spdk_mempool_get_bulk(transport->data_buf_pool, buffers,
738 						  num_buffers - i)) {
739 				return -ENOMEM;
740 			}
741 			for (j = 0; j < num_buffers - i; j++) {
742 				length = cb_func(req, buffers[j], length, io_unit_size);
743 			}
744 			i += num_buffers - i;
745 		}
746 	}
747 
748 	assert(length == 0);
749 
750 	return 0;
751 }
752 
753 int
754 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
755 			      struct spdk_nvmf_transport_poll_group *group,
756 			      struct spdk_nvmf_transport *transport,
757 			      uint32_t length)
758 {
759 	int rc;
760 
761 	req->iovcnt = 0;
762 	rc = nvmf_request_get_buffers(req, group, transport, length,
763 				      transport->opts.io_unit_size,
764 				      nvmf_request_set_buffer);
765 	if (!rc) {
766 		req->data_from_pool = true;
767 	} else if (rc == -ENOMEM) {
768 		spdk_nvmf_request_free_buffers(req, group, transport);
769 		return rc;
770 	}
771 
772 	return rc;
773 }
774 
775 static int
776 nvmf_request_set_stripped_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
777 				 uint32_t io_unit_size)
778 {
779 	struct spdk_nvmf_stripped_data *data = req->stripped_data;
780 
781 	data->buffers[data->iovcnt] = buf;
782 	data->iov[data->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
783 					   ~NVMF_DATA_BUFFER_MASK);
784 	data->iov[data->iovcnt].iov_len  = spdk_min(length, io_unit_size);
785 	length -= data->iov[data->iovcnt].iov_len;
786 	data->iovcnt++;
787 
788 	return length;
789 }
790 
791 void
792 nvmf_request_free_stripped_buffers(struct spdk_nvmf_request *req,
793 				   struct spdk_nvmf_transport_poll_group *group,
794 				   struct spdk_nvmf_transport *transport)
795 {
796 	struct spdk_nvmf_stripped_data *data = req->stripped_data;
797 	uint32_t i;
798 
799 	for (i = 0; i < data->iovcnt; i++) {
800 		if (group->buf_cache_count < group->buf_cache_size) {
801 			STAILQ_INSERT_HEAD(&group->buf_cache,
802 					   (struct spdk_nvmf_transport_pg_cache_buf *)data->buffers[i],
803 					   link);
804 			group->buf_cache_count++;
805 		} else {
806 			spdk_mempool_put(transport->data_buf_pool, data->buffers[i]);
807 		}
808 	}
809 	free(data);
810 	req->stripped_data = NULL;
811 }
812 
813 int
814 nvmf_request_get_stripped_buffers(struct spdk_nvmf_request *req,
815 				  struct spdk_nvmf_transport_poll_group *group,
816 				  struct spdk_nvmf_transport *transport,
817 				  uint32_t length)
818 {
819 	uint32_t block_size = req->dif.dif_ctx.block_size;
820 	uint32_t data_block_size = block_size - req->dif.dif_ctx.md_size;
821 	uint32_t io_unit_size = transport->opts.io_unit_size / block_size * data_block_size;
822 	struct spdk_nvmf_stripped_data *data;
823 	uint32_t i;
824 	int rc;
825 
826 	/* Data blocks must be block aligned */
827 	for (i = 0; i < req->iovcnt; i++) {
828 		if (req->iov[i].iov_len % block_size) {
829 			return -EINVAL;
830 		}
831 	}
832 
833 	data = calloc(1, sizeof(*data));
834 	if (data == NULL) {
835 		SPDK_ERRLOG("Unable to allocate memory for stripped_data.\n");
836 		return -ENOMEM;
837 	}
838 	req->stripped_data = data;
839 	req->stripped_data->iovcnt = 0;
840 
841 	rc = nvmf_request_get_buffers(req, group, transport, length, io_unit_size,
842 				      nvmf_request_set_stripped_buffer);
843 	if (rc == -ENOMEM) {
844 		nvmf_request_free_stripped_buffers(req, group, transport);
845 		return rc;
846 	}
847 	return rc;
848 }
849