xref: /spdk/lib/nvme/nvme_transport.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
7  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
8  *
9  *   Redistribution and use in source and binary forms, with or without
10  *   modification, are permitted provided that the following conditions
11  *   are met:
12  *
13  *     * Redistributions of source code must retain the above copyright
14  *       notice, this list of conditions and the following disclaimer.
15  *     * Redistributions in binary form must reproduce the above copyright
16  *       notice, this list of conditions and the following disclaimer in
17  *       the documentation and/or other materials provided with the
18  *       distribution.
19  *     * Neither the name of Intel Corporation nor the names of its
20  *       contributors may be used to endorse or promote products derived
21  *       from this software without specific prior written permission.
22  *
23  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * NVMe transport abstraction
38  */
39 
40 #include "nvme_internal.h"
41 #include "spdk/queue.h"
42 
43 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
44 
45 struct spdk_nvme_transport {
46 	struct spdk_nvme_transport_ops	ops;
47 	TAILQ_ENTRY(spdk_nvme_transport)	link;
48 };
49 
50 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
51 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
52 
53 struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
54 int g_current_transport_index = 0;
55 
56 const struct spdk_nvme_transport *
57 nvme_get_first_transport(void)
58 {
59 	return TAILQ_FIRST(&g_spdk_nvme_transports);
60 }
61 
62 const struct spdk_nvme_transport *
63 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
64 {
65 	return TAILQ_NEXT(transport, link);
66 }
67 
68 /*
69  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
70  * transport object in either the controller struct or the admin qpair. THis means
71  * that a lot of admin related transport calls will have to call nvme_get_transport
72  * in order to knwo which functions to call.
73  * In the I/O path, we have the ability to store the transport struct in the I/O
74  * qpairs to avoid taking a performance hit.
75  */
76 const struct spdk_nvme_transport *
77 nvme_get_transport(const char *transport_name)
78 {
79 	struct spdk_nvme_transport *registered_transport;
80 
81 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
82 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
83 			return registered_transport;
84 		}
85 	}
86 
87 	return NULL;
88 }
89 
90 bool
91 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
92 {
93 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
94 }
95 
96 bool
97 spdk_nvme_transport_available_by_name(const char *transport_name)
98 {
99 	return nvme_get_transport(transport_name) == NULL ? false : true;
100 }
101 
102 void spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
103 {
104 	struct spdk_nvme_transport *new_transport;
105 
106 	if (nvme_get_transport(ops->name)) {
107 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
108 		assert(false);
109 	}
110 
111 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
112 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
113 		assert(false);
114 		return;
115 	}
116 	new_transport = &g_spdk_transports[g_current_transport_index++];
117 
118 	new_transport->ops = *ops;
119 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
120 }
121 
122 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
123 		const struct spdk_nvme_ctrlr_opts *opts,
124 		void *devhandle)
125 {
126 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
127 	struct spdk_nvme_ctrlr *ctrlr;
128 
129 	if (transport == NULL) {
130 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
131 		return NULL;
132 	}
133 
134 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
135 
136 	return ctrlr;
137 }
138 
139 int
140 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
141 			  bool direct_connect)
142 {
143 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
144 
145 	if (transport == NULL) {
146 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
147 		return -ENOENT;
148 	}
149 
150 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
151 }
152 
153 int
154 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
155 {
156 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
157 
158 	assert(transport != NULL);
159 	return transport->ops.ctrlr_destruct(ctrlr);
160 }
161 
162 int
163 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
164 {
165 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
166 
167 	assert(transport != NULL);
168 	return transport->ops.ctrlr_enable(ctrlr);
169 }
170 
171 int
172 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
173 {
174 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
175 
176 	assert(transport != NULL);
177 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
178 }
179 
180 int
181 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
182 {
183 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
184 
185 	assert(transport != NULL);
186 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
187 }
188 
189 int
190 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
191 {
192 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
193 
194 	assert(transport != NULL);
195 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
196 }
197 
198 int
199 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
200 {
201 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
202 
203 	assert(transport != NULL);
204 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
205 }
206 
207 static int
208 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
209 		spdk_nvme_reg_cb cb_fn, void *cb_ctx)
210 {
211 	struct nvme_register_completion *ctx;
212 
213 	ctx = calloc(1, sizeof(*ctx));
214 	if (ctx == NULL) {
215 		return -ENOMEM;
216 	}
217 
218 	ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
219 	ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
220 	ctx->cb_fn = cb_fn;
221 	ctx->cb_ctx = cb_ctx;
222 	ctx->value = value;
223 
224 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
225 	STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
226 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
227 
228 	return 0;
229 }
230 
231 int
232 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
233 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
234 {
235 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
236 	int rc;
237 
238 	assert(transport != NULL);
239 	if (transport->ops.ctrlr_set_reg_4_async == NULL) {
240 		rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
241 		if (rc != 0) {
242 			return rc;
243 		}
244 
245 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
246 	}
247 
248 	return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
249 }
250 
251 int
252 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
253 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
254 
255 {
256 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
257 	int rc;
258 
259 	assert(transport != NULL);
260 	if (transport->ops.ctrlr_set_reg_8_async == NULL) {
261 		rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
262 		if (rc != 0) {
263 			return rc;
264 		}
265 
266 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
267 	}
268 
269 	return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
270 }
271 
272 int
273 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
274 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
275 {
276 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
277 	uint32_t value;
278 	int rc;
279 
280 	assert(transport != NULL);
281 	if (transport->ops.ctrlr_get_reg_4_async == NULL) {
282 		rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
283 		if (rc != 0) {
284 			return rc;
285 		}
286 
287 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
288 	}
289 
290 	return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
291 }
292 
293 int
294 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
295 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
296 {
297 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
298 	uint64_t value;
299 	int rc;
300 
301 	assert(transport != NULL);
302 	if (transport->ops.ctrlr_get_reg_8_async == NULL) {
303 		rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
304 		if (rc != 0) {
305 			return rc;
306 		}
307 
308 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
309 	}
310 
311 	return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
312 }
313 
314 uint32_t
315 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
316 {
317 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
318 
319 	assert(transport != NULL);
320 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
321 }
322 
323 uint16_t
324 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
325 {
326 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
327 
328 	assert(transport != NULL);
329 	return transport->ops.ctrlr_get_max_sges(ctrlr);
330 }
331 
332 int
333 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
334 {
335 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
336 
337 	assert(transport != NULL);
338 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
339 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
340 	}
341 
342 	return -ENOTSUP;
343 }
344 
345 void *
346 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
347 {
348 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
349 
350 	assert(transport != NULL);
351 	if (transport->ops.ctrlr_map_cmb != NULL) {
352 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
353 	}
354 
355 	return NULL;
356 }
357 
358 int
359 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
360 {
361 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
362 
363 	assert(transport != NULL);
364 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
365 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
366 	}
367 
368 	return 0;
369 }
370 
371 int
372 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
373 {
374 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
375 
376 	assert(transport != NULL);
377 	if (transport->ops.ctrlr_enable_pmr != NULL) {
378 		return transport->ops.ctrlr_enable_pmr(ctrlr);
379 	}
380 
381 	return -ENOSYS;
382 }
383 
384 int
385 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
386 {
387 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
388 
389 	assert(transport != NULL);
390 	if (transport->ops.ctrlr_disable_pmr != NULL) {
391 		return transport->ops.ctrlr_disable_pmr(ctrlr);
392 	}
393 
394 	return -ENOSYS;
395 }
396 
397 void *
398 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
399 {
400 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
401 
402 	assert(transport != NULL);
403 	if (transport->ops.ctrlr_map_pmr != NULL) {
404 		return transport->ops.ctrlr_map_pmr(ctrlr, size);
405 	}
406 
407 	return NULL;
408 }
409 
410 int
411 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
412 {
413 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
414 
415 	assert(transport != NULL);
416 	if (transport->ops.ctrlr_unmap_pmr != NULL) {
417 		return transport->ops.ctrlr_unmap_pmr(ctrlr);
418 	}
419 
420 	return -ENOSYS;
421 }
422 
423 struct spdk_nvme_qpair *
424 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
425 				     const struct spdk_nvme_io_qpair_opts *opts)
426 {
427 	struct spdk_nvme_qpair *qpair;
428 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
429 
430 	assert(transport != NULL);
431 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
432 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
433 		qpair->transport = transport;
434 	}
435 
436 	return qpair;
437 }
438 
439 void
440 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
441 {
442 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
443 	int rc;
444 
445 	assert(transport != NULL);
446 
447 	/* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
448 	 * the IO qpair, in which case the transport object would be invalid (each process has their
449 	 * own unique transport objects since they contain function pointers).  So we look up the
450 	 * transport object in the delete_io_qpair case.
451 	 */
452 	rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
453 	if (rc != 0) {
454 		SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
455 			    transport->ops.name);
456 		assert(false);
457 	}
458 }
459 
460 static void
461 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
462 {
463 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
464 
465 	/* If the qpair was unable to reconnect, restore the original failure reason */
466 	qpair->transport_failure_reason = qpair->last_transport_failure_reason;
467 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
468 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
469 }
470 
471 int
472 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
473 {
474 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
475 	int rc;
476 
477 	assert(transport != NULL);
478 	if (!nvme_qpair_is_admin_queue(qpair)) {
479 		qpair->transport = transport;
480 	}
481 
482 	qpair->last_transport_failure_reason = qpair->transport_failure_reason;
483 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
484 
485 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
486 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
487 	if (rc != 0) {
488 		goto err;
489 	}
490 
491 	if (qpair->poll_group) {
492 		rc = nvme_poll_group_connect_qpair(qpair);
493 		if (rc) {
494 			goto err;
495 		}
496 	}
497 
498 	if (!qpair->async) {
499 		/* Busy wait until the qpair exits the connecting state */
500 		while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
501 			if (qpair->poll_group) {
502 				rc = spdk_nvme_poll_group_process_completions(
503 					     qpair->poll_group->group, 0,
504 					     nvme_transport_connect_qpair_fail);
505 			} else {
506 				rc = spdk_nvme_qpair_process_completions(qpair, 0);
507 			}
508 
509 			if (rc < 0) {
510 				goto err;
511 			}
512 		}
513 	}
514 
515 	return 0;
516 err:
517 	nvme_transport_connect_qpair_fail(qpair, NULL);
518 	return rc;
519 }
520 
521 void
522 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
523 {
524 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
525 
526 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
527 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
528 		return;
529 	}
530 
531 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
532 	assert(transport != NULL);
533 	if (qpair->poll_group) {
534 		nvme_poll_group_disconnect_qpair(qpair);
535 	}
536 
537 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
538 
539 	nvme_qpair_abort_all_queued_reqs(qpair, 0);
540 	nvme_transport_qpair_abort_reqs(qpair, 0);
541 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
542 }
543 
544 int
545 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
546 					struct spdk_memory_domain **domains, int array_size)
547 {
548 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
549 
550 	assert(transport != NULL);
551 	if (transport->ops.ctrlr_get_memory_domains) {
552 		return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
553 	}
554 
555 	return 0;
556 }
557 
558 void
559 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
560 {
561 	const struct spdk_nvme_transport *transport;
562 
563 	assert(dnr <= 1);
564 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
565 		qpair->transport->ops.qpair_abort_reqs(qpair, dnr);
566 	} else {
567 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
568 		assert(transport != NULL);
569 		transport->ops.qpair_abort_reqs(qpair, dnr);
570 	}
571 }
572 
573 int
574 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
575 {
576 	const struct spdk_nvme_transport *transport;
577 
578 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
579 		return qpair->transport->ops.qpair_reset(qpair);
580 	}
581 
582 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
583 	assert(transport != NULL);
584 	return transport->ops.qpair_reset(qpair);
585 }
586 
587 int
588 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
589 {
590 	const struct spdk_nvme_transport *transport;
591 
592 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
593 		return qpair->transport->ops.qpair_submit_request(qpair, req);
594 	}
595 
596 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
597 	assert(transport != NULL);
598 	return transport->ops.qpair_submit_request(qpair, req);
599 }
600 
601 int32_t
602 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
603 {
604 	const struct spdk_nvme_transport *transport;
605 
606 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
607 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
608 	}
609 
610 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
611 	assert(transport != NULL);
612 	return transport->ops.qpair_process_completions(qpair, max_completions);
613 }
614 
615 int
616 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
617 				      int (*iter_fn)(struct nvme_request *req, void *arg),
618 				      void *arg)
619 {
620 	const struct spdk_nvme_transport *transport;
621 
622 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
623 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
624 	}
625 
626 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
627 	assert(transport != NULL);
628 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
629 }
630 
631 void
632 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
633 {
634 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
635 
636 	assert(transport != NULL);
637 	transport->ops.admin_qpair_abort_aers(qpair);
638 }
639 
640 struct spdk_nvme_transport_poll_group *
641 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
642 {
643 	struct spdk_nvme_transport_poll_group *group = NULL;
644 
645 	group = transport->ops.poll_group_create();
646 	if (group) {
647 		group->transport = transport;
648 		STAILQ_INIT(&group->connected_qpairs);
649 		STAILQ_INIT(&group->disconnected_qpairs);
650 	}
651 
652 	return group;
653 }
654 
655 struct spdk_nvme_transport_poll_group *
656 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
657 		struct spdk_nvme_qpair *qpair)
658 {
659 	if (transport->ops.qpair_get_optimal_poll_group) {
660 		return transport->ops.qpair_get_optimal_poll_group(qpair);
661 	} else {
662 		return NULL;
663 	}
664 }
665 
666 int
667 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
668 			      struct spdk_nvme_qpair *qpair)
669 {
670 	int rc;
671 
672 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
673 	if (rc == 0) {
674 		qpair->poll_group = tgroup;
675 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
676 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
677 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
678 	}
679 
680 	return rc;
681 }
682 
683 int
684 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
685 				 struct spdk_nvme_qpair *qpair)
686 {
687 	int rc;
688 
689 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
690 	if (rc == 0) {
691 		if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
692 			STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
693 		} else if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
694 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
695 		} else {
696 			return -ENOENT;
697 		}
698 
699 		qpair->poll_group = NULL;
700 		qpair->poll_group_tailq_head = NULL;
701 	}
702 
703 	return rc;
704 }
705 
706 int64_t
707 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
708 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
709 {
710 	struct spdk_nvme_qpair *qpair;
711 	int64_t rc;
712 
713 	tgroup->in_completion_context = true;
714 	rc = tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
715 			disconnected_qpair_cb);
716 	tgroup->in_completion_context = false;
717 
718 	if (spdk_unlikely(tgroup->num_qpairs_to_delete > 0)) {
719 		/* deleted qpairs are more likely to be in the disconnected qpairs list. */
720 		STAILQ_FOREACH(qpair, &tgroup->disconnected_qpairs, poll_group_stailq) {
721 			if (spdk_unlikely(qpair->delete_after_completion_context)) {
722 				spdk_nvme_ctrlr_free_io_qpair(qpair);
723 				if (--tgroup->num_qpairs_to_delete == 0) {
724 					return rc;
725 				}
726 			}
727 		}
728 
729 		STAILQ_FOREACH(qpair, &tgroup->connected_qpairs, poll_group_stailq) {
730 			if (spdk_unlikely(qpair->delete_after_completion_context)) {
731 				spdk_nvme_ctrlr_free_io_qpair(qpair);
732 				if (--tgroup->num_qpairs_to_delete == 0) {
733 					return rc;
734 				}
735 			}
736 		}
737 		/* Just in case. */
738 		SPDK_DEBUGLOG(nvme, "Mismatch between qpairs to delete and poll group number.\n");
739 		tgroup->num_qpairs_to_delete = 0;
740 	}
741 
742 	return rc;
743 }
744 
745 int
746 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
747 {
748 	return tgroup->transport->ops.poll_group_destroy(tgroup);
749 }
750 
751 int
752 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
753 {
754 	struct spdk_nvme_transport_poll_group *tgroup;
755 	int rc;
756 
757 	tgroup = qpair->poll_group;
758 
759 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
760 		return 0;
761 	}
762 
763 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
764 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
765 		if (rc == 0) {
766 			qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
767 			STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
768 			STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
769 			/* EINPROGRESS indicates that a call has already been made to this function.
770 			 * It just keeps us from segfaulting on a double removal/insert.
771 			 */
772 		}
773 
774 		return rc == -EINPROGRESS ? 0 : rc;
775 	}
776 
777 	return -EINVAL;
778 }
779 
780 int
781 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
782 {
783 	struct spdk_nvme_transport_poll_group *tgroup;
784 	int rc;
785 
786 	tgroup = qpair->poll_group;
787 
788 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
789 		return 0;
790 	}
791 
792 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
793 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
794 		if (rc == 0) {
795 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
796 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
797 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
798 		}
799 
800 		return rc == -EINPROGRESS ? 0 : rc;
801 	}
802 
803 
804 	return -EINVAL;
805 }
806 
807 int
808 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
809 				    struct spdk_nvme_transport_poll_group_stat **stats)
810 {
811 	if (tgroup->transport->ops.poll_group_get_stats) {
812 		return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
813 	}
814 	return -ENOTSUP;
815 }
816 
817 void
818 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
819 				     struct spdk_nvme_transport_poll_group_stat *stats)
820 {
821 	if (tgroup->transport->ops.poll_group_free_stats) {
822 		tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
823 	}
824 }
825 
826 enum spdk_nvme_transport_type nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
827 {
828 	return transport->ops.type;
829 }
830