xref: /spdk/lib/nvmf/transport.c (revision 2172c432cfdaecc5a279d64e37c6b51e794683c1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2018-2019 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "nvmf_internal.h"
37 #include "transport.h"
38 
39 #include "spdk/config.h"
40 #include "spdk/log.h"
41 #include "spdk/nvmf.h"
42 #include "spdk/nvmf_transport.h"
43 #include "spdk/queue.h"
44 #include "spdk/util.h"
45 
46 #define MAX_MEMPOOL_NAME_LENGTH 40
47 #define NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS 120000
48 
49 struct nvmf_transport_ops_list_element {
50 	struct spdk_nvmf_transport_ops			ops;
51 	TAILQ_ENTRY(nvmf_transport_ops_list_element)	link;
52 };
53 
54 TAILQ_HEAD(nvmf_transport_ops_list, nvmf_transport_ops_list_element)
55 g_spdk_nvmf_transport_ops = TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_transport_ops);
56 
57 static inline const struct spdk_nvmf_transport_ops *
58 nvmf_get_transport_ops(const char *transport_name)
59 {
60 	struct nvmf_transport_ops_list_element *ops;
61 	TAILQ_FOREACH(ops, &g_spdk_nvmf_transport_ops, link) {
62 		if (strcasecmp(transport_name, ops->ops.name) == 0) {
63 			return &ops->ops;
64 		}
65 	}
66 	return NULL;
67 }
68 
69 void
70 spdk_nvmf_transport_register(const struct spdk_nvmf_transport_ops *ops)
71 {
72 	struct nvmf_transport_ops_list_element *new_ops;
73 
74 	if (nvmf_get_transport_ops(ops->name) != NULL) {
75 		SPDK_ERRLOG("Double registering nvmf transport type %s.\n", ops->name);
76 		assert(false);
77 		return;
78 	}
79 
80 	new_ops = calloc(1, sizeof(*new_ops));
81 	if (new_ops == NULL) {
82 		SPDK_ERRLOG("Unable to allocate memory to register new transport type %s.\n", ops->name);
83 		assert(false);
84 		return;
85 	}
86 
87 	new_ops->ops = *ops;
88 
89 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_transport_ops, new_ops, link);
90 }
91 
92 const struct spdk_nvmf_transport_opts *
93 spdk_nvmf_get_transport_opts(struct spdk_nvmf_transport *transport)
94 {
95 	return &transport->opts;
96 }
97 
98 spdk_nvme_transport_type_t
99 spdk_nvmf_get_transport_type(struct spdk_nvmf_transport *transport)
100 {
101 	return transport->ops->type;
102 }
103 
104 const char *
105 spdk_nvmf_get_transport_name(struct spdk_nvmf_transport *transport)
106 {
107 	return transport->ops->name;
108 }
109 
110 struct spdk_nvmf_transport *
111 spdk_nvmf_transport_create(const char *transport_name, struct spdk_nvmf_transport_opts *opts)
112 {
113 	const struct spdk_nvmf_transport_ops *ops = NULL;
114 	struct spdk_nvmf_transport *transport;
115 	char spdk_mempool_name[MAX_MEMPOOL_NAME_LENGTH];
116 	int chars_written;
117 
118 	ops = nvmf_get_transport_ops(transport_name);
119 	if (!ops) {
120 		SPDK_ERRLOG("Transport type '%s' unavailable.\n", transport_name);
121 		return NULL;
122 	}
123 
124 	if (opts->max_aq_depth < SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE) {
125 		SPDK_ERRLOG("max_aq_depth %u is less than minimum defined by NVMf spec, use min value\n",
126 			    opts->max_aq_depth);
127 		opts->max_aq_depth = SPDK_NVMF_MIN_ADMIN_MAX_SQ_SIZE;
128 	}
129 
130 	transport = ops->create(opts);
131 	if (!transport) {
132 		SPDK_ERRLOG("Unable to create new transport of type %s\n", transport_name);
133 		return NULL;
134 	}
135 
136 	TAILQ_INIT(&transport->listeners);
137 
138 	transport->ops = ops;
139 	transport->opts = *opts;
140 	chars_written = snprintf(spdk_mempool_name, MAX_MEMPOOL_NAME_LENGTH, "%s_%s_%s", "spdk_nvmf",
141 				 transport_name, "data");
142 	if (chars_written < 0) {
143 		SPDK_ERRLOG("Unable to generate transport data buffer pool name.\n");
144 		ops->destroy(transport);
145 		return NULL;
146 	}
147 
148 	transport->data_buf_pool = spdk_mempool_create(spdk_mempool_name,
149 				   opts->num_shared_buffers,
150 				   opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT,
151 				   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
152 				   SPDK_ENV_SOCKET_ID_ANY);
153 
154 	if (!transport->data_buf_pool) {
155 		SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
156 		ops->destroy(transport);
157 		return NULL;
158 	}
159 
160 	return transport;
161 }
162 
163 struct spdk_nvmf_transport *
164 spdk_nvmf_transport_get_first(struct spdk_nvmf_tgt *tgt)
165 {
166 	return TAILQ_FIRST(&tgt->transports);
167 }
168 
169 struct spdk_nvmf_transport *
170 spdk_nvmf_transport_get_next(struct spdk_nvmf_transport *transport)
171 {
172 	return TAILQ_NEXT(transport, link);
173 }
174 
175 int
176 spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport)
177 {
178 	if (transport->data_buf_pool != NULL) {
179 		if (spdk_mempool_count(transport->data_buf_pool) !=
180 		    transport->opts.num_shared_buffers) {
181 			SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
182 				    spdk_mempool_count(transport->data_buf_pool),
183 				    transport->opts.num_shared_buffers);
184 		}
185 	}
186 
187 	spdk_mempool_free(transport->data_buf_pool);
188 
189 	return transport->ops->destroy(transport);
190 }
191 
192 struct spdk_nvmf_listener *
193 nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
194 			     const struct spdk_nvme_transport_id *trid)
195 {
196 	struct spdk_nvmf_listener *listener;
197 
198 	TAILQ_FOREACH(listener, &transport->listeners, link) {
199 		if (spdk_nvme_transport_id_compare(&listener->trid, trid) == 0) {
200 			return listener;
201 		}
202 	}
203 
204 	return NULL;
205 }
206 
207 int
208 spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
209 			   const struct spdk_nvme_transport_id *trid)
210 {
211 	struct spdk_nvmf_listener *listener;
212 	int rc;
213 
214 	listener = nvmf_transport_find_listener(transport, trid);
215 	if (!listener) {
216 		listener = calloc(1, sizeof(*listener));
217 		if (!listener) {
218 			return -ENOMEM;
219 		}
220 
221 		listener->ref = 1;
222 		listener->trid = *trid;
223 		TAILQ_INSERT_TAIL(&transport->listeners, listener, link);
224 
225 		rc = transport->ops->listen(transport, &listener->trid);
226 		if (rc != 0) {
227 			TAILQ_REMOVE(&transport->listeners, listener, link);
228 			free(listener);
229 		}
230 		return rc;
231 	}
232 
233 	++listener->ref;
234 
235 	return 0;
236 }
237 
238 int
239 spdk_nvmf_transport_stop_listen(struct spdk_nvmf_transport *transport,
240 				const struct spdk_nvme_transport_id *trid)
241 {
242 	struct spdk_nvmf_listener *listener;
243 
244 	listener = nvmf_transport_find_listener(transport, trid);
245 	if (!listener) {
246 		return -ENOENT;
247 	}
248 
249 	if (--listener->ref == 0) {
250 		TAILQ_REMOVE(&transport->listeners, listener, link);
251 		transport->ops->stop_listen(transport, trid);
252 		free(listener);
253 	}
254 
255 	return 0;
256 }
257 
258 struct nvmf_stop_listen_ctx {
259 	struct spdk_nvmf_transport *transport;
260 	struct spdk_nvme_transport_id trid;
261 	spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn;
262 	void *cb_arg;
263 };
264 
265 static void
266 nvmf_stop_listen_fini(struct spdk_io_channel_iter *i, int status)
267 {
268 	struct nvmf_stop_listen_ctx *ctx;
269 	struct spdk_nvmf_transport *transport;
270 	int rc = status;
271 
272 	ctx = spdk_io_channel_iter_get_ctx(i);
273 	transport = ctx->transport;
274 	assert(transport != NULL);
275 
276 	rc = spdk_nvmf_transport_stop_listen(transport, &ctx->trid);
277 	if (rc) {
278 		SPDK_ERRLOG("Failed to stop listening on address '%s'\n", ctx->trid.traddr);
279 	}
280 
281 	if (ctx->cb_fn) {
282 		ctx->cb_fn(ctx->cb_arg, rc);
283 	}
284 	free(ctx);
285 }
286 
287 static void
288 nvmf_stop_listen_disconnect_qpairs(struct spdk_io_channel_iter *i)
289 {
290 	struct nvmf_stop_listen_ctx *ctx;
291 	struct spdk_nvmf_poll_group *group;
292 	struct spdk_io_channel *ch;
293 	struct spdk_nvmf_qpair *qpair, *tmp_qpair;
294 	struct spdk_nvme_transport_id tmp_trid;
295 
296 	ctx = spdk_io_channel_iter_get_ctx(i);
297 	ch = spdk_io_channel_iter_get_channel(i);
298 	group = spdk_io_channel_get_ctx(ch);
299 
300 	TAILQ_FOREACH_SAFE(qpair, &group->qpairs, link, tmp_qpair) {
301 		/* skip qpairs that don't match the TRID. */
302 		if (spdk_nvmf_qpair_get_listen_trid(qpair, &tmp_trid)) {
303 			continue;
304 		}
305 
306 		if (!spdk_nvme_transport_id_compare(&ctx->trid, &tmp_trid)) {
307 			spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
308 		}
309 	}
310 	spdk_for_each_channel_continue(i, 0);
311 }
312 
313 int
314 spdk_nvmf_transport_stop_listen_async(struct spdk_nvmf_transport *transport,
315 				      const struct spdk_nvme_transport_id *trid,
316 				      spdk_nvmf_tgt_subsystem_listen_done_fn cb_fn,
317 				      void *cb_arg)
318 {
319 	struct nvmf_stop_listen_ctx *ctx;
320 
321 	ctx = calloc(1, sizeof(struct nvmf_stop_listen_ctx));
322 	if (ctx == NULL) {
323 		return -ENOMEM;
324 	}
325 
326 	ctx->trid = *trid;
327 	ctx->transport = transport;
328 	ctx->cb_fn = cb_fn;
329 	ctx->cb_arg = cb_arg;
330 
331 	spdk_for_each_channel(transport->tgt, nvmf_stop_listen_disconnect_qpairs, ctx,
332 			      nvmf_stop_listen_fini);
333 
334 	return 0;
335 }
336 
337 uint32_t
338 nvmf_transport_accept(struct spdk_nvmf_transport *transport)
339 {
340 	return transport->ops->accept(transport);
341 }
342 
343 void
344 nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
345 				 struct spdk_nvme_transport_id *trid,
346 				 struct spdk_nvmf_discovery_log_page_entry *entry)
347 {
348 	transport->ops->listener_discover(transport, trid, entry);
349 }
350 
351 struct spdk_nvmf_transport_poll_group *
352 nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport)
353 {
354 	struct spdk_nvmf_transport_poll_group *group;
355 	struct spdk_nvmf_transport_pg_cache_buf **bufs;
356 	uint32_t i;
357 
358 	group = transport->ops->poll_group_create(transport);
359 	if (!group) {
360 		return NULL;
361 	}
362 	group->transport = transport;
363 
364 	STAILQ_INIT(&group->pending_buf_queue);
365 	STAILQ_INIT(&group->buf_cache);
366 
367 	if (transport->opts.buf_cache_size) {
368 		group->buf_cache_size = transport->opts.buf_cache_size;
369 		bufs = calloc(group->buf_cache_size, sizeof(struct spdk_nvmf_transport_pg_cache_buf *));
370 
371 		if (!bufs) {
372 			SPDK_ERRLOG("Memory allocation failed, can't reserve buffers for the pg buffer cache\n");
373 			return group;
374 		}
375 
376 		if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, group->buf_cache_size)) {
377 			group->buf_cache_size = (uint32_t)spdk_mempool_count(transport->data_buf_pool);
378 			SPDK_NOTICELOG("Unable to reserve the full number of buffers for the pg buffer cache. "
379 				       "Decrease the number of cached buffers from %u to %u\n",
380 				       transport->opts.buf_cache_size, group->buf_cache_size);
381 			/* Sanity check */
382 			assert(group->buf_cache_size <= transport->opts.buf_cache_size);
383 			/* Try again with less number of buffers */
384 			if (spdk_mempool_get_bulk(transport->data_buf_pool, (void **)bufs, group->buf_cache_size)) {
385 				SPDK_NOTICELOG("Failed to reserve %u buffers\n", group->buf_cache_size);
386 				group->buf_cache_size = 0;
387 			}
388 		}
389 
390 		for (i = 0; i < group->buf_cache_size; i++) {
391 			STAILQ_INSERT_HEAD(&group->buf_cache, bufs[i], link);
392 		}
393 		group->buf_cache_count = group->buf_cache_size;
394 
395 		free(bufs);
396 	}
397 	return group;
398 }
399 
400 struct spdk_nvmf_transport_poll_group *
401 nvmf_transport_get_optimal_poll_group(struct spdk_nvmf_transport *transport,
402 				      struct spdk_nvmf_qpair *qpair)
403 {
404 	if (transport->ops->get_optimal_poll_group) {
405 		return transport->ops->get_optimal_poll_group(qpair);
406 	} else {
407 		return NULL;
408 	}
409 }
410 
411 void
412 nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
413 {
414 	struct spdk_nvmf_transport_pg_cache_buf *buf, *tmp;
415 
416 	if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
417 		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
418 	}
419 
420 	STAILQ_FOREACH_SAFE(buf, &group->buf_cache, link, tmp) {
421 		STAILQ_REMOVE(&group->buf_cache, buf, spdk_nvmf_transport_pg_cache_buf, link);
422 		spdk_mempool_put(group->transport->data_buf_pool, buf);
423 	}
424 	group->transport->ops->poll_group_destroy(group);
425 }
426 
427 int
428 nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
429 			      struct spdk_nvmf_qpair *qpair)
430 {
431 	if (qpair->transport) {
432 		assert(qpair->transport == group->transport);
433 		if (qpair->transport != group->transport) {
434 			return -1;
435 		}
436 	} else {
437 		qpair->transport = group->transport;
438 	}
439 
440 	return group->transport->ops->poll_group_add(group, qpair);
441 }
442 
443 int
444 nvmf_transport_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
445 				 struct spdk_nvmf_qpair *qpair)
446 {
447 	int rc = ENOTSUP;
448 
449 	assert(qpair->transport == group->transport);
450 	if (group->transport->ops->poll_group_remove) {
451 		rc = group->transport->ops->poll_group_remove(group, qpair);
452 	}
453 
454 	return rc;
455 }
456 
457 int
458 nvmf_transport_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
459 {
460 	return group->transport->ops->poll_group_poll(group);
461 }
462 
463 int
464 nvmf_transport_req_free(struct spdk_nvmf_request *req)
465 {
466 	return req->qpair->transport->ops->req_free(req);
467 }
468 
469 int
470 nvmf_transport_req_complete(struct spdk_nvmf_request *req)
471 {
472 	return req->qpair->transport->ops->req_complete(req);
473 }
474 
475 void
476 nvmf_transport_qpair_fini(struct spdk_nvmf_qpair *qpair)
477 {
478 	qpair->transport->ops->qpair_fini(qpair);
479 }
480 
481 int
482 nvmf_transport_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
483 				   struct spdk_nvme_transport_id *trid)
484 {
485 	return qpair->transport->ops->qpair_get_peer_trid(qpair, trid);
486 }
487 
488 int
489 nvmf_transport_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
490 				    struct spdk_nvme_transport_id *trid)
491 {
492 	return qpair->transport->ops->qpair_get_local_trid(qpair, trid);
493 }
494 
495 int
496 nvmf_transport_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
497 				     struct spdk_nvme_transport_id *trid)
498 {
499 	return qpair->transport->ops->qpair_get_listen_trid(qpair, trid);
500 }
501 
502 void
503 nvmf_transport_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
504 				   struct spdk_nvmf_request *req)
505 {
506 	qpair->transport->ops->qpair_abort_request(qpair, req);
507 }
508 
509 bool
510 spdk_nvmf_transport_opts_init(const char *transport_name,
511 			      struct spdk_nvmf_transport_opts *opts)
512 {
513 	const struct spdk_nvmf_transport_ops *ops;
514 
515 	ops = nvmf_get_transport_ops(transport_name);
516 	if (!ops) {
517 		SPDK_ERRLOG("Transport type %s unavailable.\n", transport_name);
518 		return false;
519 	}
520 
521 	opts->association_timeout = NVMF_TRANSPORT_DEFAULT_ASSOCIATION_TIMEOUT_IN_MS;
522 	ops->opts_init(opts);
523 	return true;
524 }
525 
526 int
527 spdk_nvmf_transport_poll_group_get_stat(struct spdk_nvmf_tgt *tgt,
528 					struct spdk_nvmf_transport *transport,
529 					struct spdk_nvmf_transport_poll_group_stat **stat)
530 {
531 	if (transport->ops->poll_group_get_stat) {
532 		return transport->ops->poll_group_get_stat(tgt, stat);
533 	} else {
534 		return -ENOTSUP;
535 	}
536 }
537 
538 void
539 spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
540 		struct spdk_nvmf_transport_poll_group_stat *stat)
541 {
542 	if (transport->ops->poll_group_free_stat) {
543 		transport->ops->poll_group_free_stat(stat);
544 	}
545 }
546 
547 void
548 spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
549 			       struct spdk_nvmf_transport_poll_group *group,
550 			       struct spdk_nvmf_transport *transport)
551 {
552 	uint32_t i;
553 
554 	for (i = 0; i < req->iovcnt; i++) {
555 		if (group->buf_cache_count < group->buf_cache_size) {
556 			STAILQ_INSERT_HEAD(&group->buf_cache,
557 					   (struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
558 					   link);
559 			group->buf_cache_count++;
560 		} else {
561 			spdk_mempool_put(transport->data_buf_pool, req->buffers[i]);
562 		}
563 		req->iov[i].iov_base = NULL;
564 		req->buffers[i] = NULL;
565 		req->iov[i].iov_len = 0;
566 	}
567 	req->data_from_pool = false;
568 }
569 
570 static inline int
571 nvmf_request_set_buffer(struct spdk_nvmf_request *req, void *buf, uint32_t length,
572 			uint32_t io_unit_size)
573 {
574 	req->buffers[req->iovcnt] = buf;
575 	req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
576 					 ~NVMF_DATA_BUFFER_MASK);
577 	req->iov[req->iovcnt].iov_len  = spdk_min(length, io_unit_size);
578 	length -= req->iov[req->iovcnt].iov_len;
579 	req->iovcnt++;
580 
581 	return length;
582 }
583 
584 static int
585 nvmf_request_get_buffers(struct spdk_nvmf_request *req,
586 			 struct spdk_nvmf_transport_poll_group *group,
587 			 struct spdk_nvmf_transport *transport,
588 			 uint32_t length)
589 {
590 	uint32_t io_unit_size = transport->opts.io_unit_size;
591 	uint32_t num_buffers;
592 	uint32_t i = 0, j;
593 	void *buffer, *buffers[NVMF_REQ_MAX_BUFFERS];
594 
595 	/* If the number of buffers is too large, then we know the I/O is larger than allowed.
596 	 *  Fail it.
597 	 */
598 	num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
599 	if (num_buffers + req->iovcnt > NVMF_REQ_MAX_BUFFERS) {
600 		return -EINVAL;
601 	}
602 
603 	while (i < num_buffers) {
604 		if (!(STAILQ_EMPTY(&group->buf_cache))) {
605 			group->buf_cache_count--;
606 			buffer = STAILQ_FIRST(&group->buf_cache);
607 			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
608 			assert(buffer != NULL);
609 
610 			length = nvmf_request_set_buffer(req, buffer, length, io_unit_size);
611 			i++;
612 		} else {
613 			if (spdk_mempool_get_bulk(transport->data_buf_pool, buffers,
614 						  num_buffers - i)) {
615 				return -ENOMEM;
616 			}
617 			for (j = 0; j < num_buffers - i; j++) {
618 				length = nvmf_request_set_buffer(req, buffers[j], length, io_unit_size);
619 			}
620 			i += num_buffers - i;
621 		}
622 	}
623 
624 	assert(length == 0);
625 
626 	req->data_from_pool = true;
627 	return 0;
628 }
629 
630 int
631 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
632 			      struct spdk_nvmf_transport_poll_group *group,
633 			      struct spdk_nvmf_transport *transport,
634 			      uint32_t length)
635 {
636 	int rc;
637 
638 	req->iovcnt = 0;
639 
640 	rc = nvmf_request_get_buffers(req, group, transport, length);
641 	if (rc == -ENOMEM) {
642 		spdk_nvmf_request_free_buffers(req, group, transport);
643 	}
644 
645 	return rc;
646 }
647 
648 int
649 spdk_nvmf_request_get_buffers_multi(struct spdk_nvmf_request *req,
650 				    struct spdk_nvmf_transport_poll_group *group,
651 				    struct spdk_nvmf_transport *transport,
652 				    uint32_t *lengths, uint32_t num_lengths)
653 {
654 	int rc = 0;
655 	uint32_t i;
656 
657 	req->iovcnt = 0;
658 
659 	for (i = 0; i < num_lengths; i++) {
660 		rc = nvmf_request_get_buffers(req, group, transport, lengths[i]);
661 		if (rc != 0) {
662 			goto err_exit;
663 		}
664 	}
665 
666 	return 0;
667 
668 err_exit:
669 	spdk_nvmf_request_free_buffers(req, group, transport);
670 	return rc;
671 }
672