xref: /spdk/lib/nvme/nvme_transport.c (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe transport abstraction
10  */
11 
12 #include "nvme_internal.h"
13 #include "spdk/queue.h"
14 
15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 
17 struct spdk_nvme_transport {
18 	struct spdk_nvme_transport_ops	ops;
19 	TAILQ_ENTRY(spdk_nvme_transport)	link;
20 };
21 
22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 
25 static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 static int g_current_transport_index = 0;
27 
28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 	.rdma_srq_size = 0,
30 	.rdma_max_cq_size = 0,
31 	.rdma_cm_event_timeout_ms = 1000
32 };
33 
34 const struct spdk_nvme_transport *
35 nvme_get_first_transport(void)
36 {
37 	return TAILQ_FIRST(&g_spdk_nvme_transports);
38 }
39 
40 const struct spdk_nvme_transport *
41 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
42 {
43 	return TAILQ_NEXT(transport, link);
44 }
45 
46 /*
47  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
48  * transport object in either the controller struct or the admin qpair. This means
49  * that a lot of admin related transport calls will have to call nvme_get_transport
50  * in order to know which functions to call.
51  * In the I/O path, we have the ability to store the transport struct in the I/O
52  * qpairs to avoid taking a performance hit.
53  */
54 const struct spdk_nvme_transport *
55 nvme_get_transport(const char *transport_name)
56 {
57 	struct spdk_nvme_transport *registered_transport;
58 
59 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
60 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
61 			return registered_transport;
62 		}
63 	}
64 
65 	return NULL;
66 }
67 
68 bool
69 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
70 {
71 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
72 }
73 
74 bool
75 spdk_nvme_transport_available_by_name(const char *transport_name)
76 {
77 	return nvme_get_transport(transport_name) == NULL ? false : true;
78 }
79 
80 void
81 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
82 {
83 	struct spdk_nvme_transport *new_transport;
84 
85 	if (nvme_get_transport(ops->name)) {
86 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
87 		assert(false);
88 	}
89 
90 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
91 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
92 		assert(false);
93 		return;
94 	}
95 	new_transport = &g_transports[g_current_transport_index++];
96 
97 	new_transport->ops = *ops;
98 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
99 }
100 
101 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
102 		const struct spdk_nvme_ctrlr_opts *opts,
103 		void *devhandle)
104 {
105 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
106 	struct spdk_nvme_ctrlr *ctrlr;
107 
108 	if (transport == NULL) {
109 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
110 		return NULL;
111 	}
112 
113 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
114 
115 	return ctrlr;
116 }
117 
118 int
119 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
120 			  bool direct_connect)
121 {
122 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
123 
124 	if (transport == NULL) {
125 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
126 		return -ENOENT;
127 	}
128 
129 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
130 }
131 
132 int
133 nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
134 {
135 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
136 
137 	if (transport == NULL) {
138 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
139 		return -ENOENT;
140 	}
141 
142 	if (transport->ops.ctrlr_scan_attached != NULL) {
143 		return transport->ops.ctrlr_scan_attached(probe_ctx);
144 	}
145 	SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n",
146 		    probe_ctx->trid.trstring);
147 	return -ENOTSUP;
148 }
149 
150 int
151 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
152 {
153 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
154 
155 	assert(transport != NULL);
156 	return transport->ops.ctrlr_destruct(ctrlr);
157 }
158 
159 int
160 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
161 {
162 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
163 
164 	assert(transport != NULL);
165 	return transport->ops.ctrlr_enable(ctrlr);
166 }
167 
168 int
169 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
170 {
171 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
172 
173 	assert(transport != NULL);
174 	if (transport->ops.ctrlr_ready) {
175 		return transport->ops.ctrlr_ready(ctrlr);
176 	}
177 
178 	return 0;
179 }
180 
181 int
182 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
183 {
184 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
185 
186 	assert(transport != NULL);
187 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
188 }
189 
190 int
191 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
192 {
193 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
194 
195 	assert(transport != NULL);
196 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
197 }
198 
199 int
200 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
201 {
202 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
203 
204 	assert(transport != NULL);
205 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
206 }
207 
208 int
209 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
210 {
211 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
212 
213 	assert(transport != NULL);
214 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
215 }
216 
217 static int
218 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
219 		spdk_nvme_reg_cb cb_fn, void *cb_ctx)
220 {
221 	struct nvme_register_completion *ctx;
222 
223 	ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
224 	if (ctx == NULL) {
225 		return -ENOMEM;
226 	}
227 
228 	ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
229 	ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
230 	ctx->cb_fn = cb_fn;
231 	ctx->cb_ctx = cb_ctx;
232 	ctx->value = value;
233 	ctx->pid = getpid();
234 
235 	nvme_ctrlr_lock(ctrlr);
236 	STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
237 	nvme_ctrlr_unlock(ctrlr);
238 
239 	return 0;
240 }
241 
242 int
243 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
244 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
245 {
246 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
247 	int rc;
248 
249 	assert(transport != NULL);
250 	if (transport->ops.ctrlr_set_reg_4_async == NULL) {
251 		rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
252 		if (rc != 0) {
253 			return rc;
254 		}
255 
256 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
257 	}
258 
259 	return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
260 }
261 
262 int
263 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
264 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
265 
266 {
267 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
268 	int rc;
269 
270 	assert(transport != NULL);
271 	if (transport->ops.ctrlr_set_reg_8_async == NULL) {
272 		rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
273 		if (rc != 0) {
274 			return rc;
275 		}
276 
277 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
278 	}
279 
280 	return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
281 }
282 
283 int
284 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
285 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
286 {
287 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
288 	uint32_t value;
289 	int rc;
290 
291 	assert(transport != NULL);
292 	if (transport->ops.ctrlr_get_reg_4_async == NULL) {
293 		rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
294 		if (rc != 0) {
295 			return rc;
296 		}
297 
298 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
299 	}
300 
301 	return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
302 }
303 
304 int
305 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
306 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
307 {
308 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
309 	uint64_t value;
310 	int rc;
311 
312 	assert(transport != NULL);
313 	if (transport->ops.ctrlr_get_reg_8_async == NULL) {
314 		rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
315 		if (rc != 0) {
316 			return rc;
317 		}
318 
319 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
320 	}
321 
322 	return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
323 }
324 
325 uint32_t
326 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
327 {
328 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
329 
330 	assert(transport != NULL);
331 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
332 }
333 
334 uint16_t
335 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
336 {
337 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
338 
339 	assert(transport != NULL);
340 	return transport->ops.ctrlr_get_max_sges(ctrlr);
341 }
342 
343 int
344 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
345 {
346 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
347 
348 	assert(transport != NULL);
349 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
350 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
351 	}
352 
353 	return -ENOTSUP;
354 }
355 
356 void *
357 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
358 {
359 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
360 
361 	assert(transport != NULL);
362 	if (transport->ops.ctrlr_map_cmb != NULL) {
363 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
364 	}
365 
366 	return NULL;
367 }
368 
369 int
370 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
371 {
372 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
373 
374 	assert(transport != NULL);
375 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
376 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
377 	}
378 
379 	return 0;
380 }
381 
382 int
383 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
384 {
385 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
386 
387 	assert(transport != NULL);
388 	if (transport->ops.ctrlr_enable_pmr != NULL) {
389 		return transport->ops.ctrlr_enable_pmr(ctrlr);
390 	}
391 
392 	return -ENOSYS;
393 }
394 
395 int
396 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
397 {
398 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
399 
400 	assert(transport != NULL);
401 	if (transport->ops.ctrlr_disable_pmr != NULL) {
402 		return transport->ops.ctrlr_disable_pmr(ctrlr);
403 	}
404 
405 	return -ENOSYS;
406 }
407 
408 void *
409 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
410 {
411 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
412 
413 	assert(transport != NULL);
414 	if (transport->ops.ctrlr_map_pmr != NULL) {
415 		return transport->ops.ctrlr_map_pmr(ctrlr, size);
416 	}
417 
418 	return NULL;
419 }
420 
421 int
422 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
423 {
424 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
425 
426 	assert(transport != NULL);
427 	if (transport->ops.ctrlr_unmap_pmr != NULL) {
428 		return transport->ops.ctrlr_unmap_pmr(ctrlr);
429 	}
430 
431 	return -ENOSYS;
432 }
433 
434 struct spdk_nvme_qpair *
435 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
436 				     const struct spdk_nvme_io_qpair_opts *opts)
437 {
438 	struct spdk_nvme_qpair *qpair;
439 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
440 
441 	assert(transport != NULL);
442 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
443 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
444 		qpair->transport = transport;
445 	}
446 
447 	return qpair;
448 }
449 
450 void
451 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
452 {
453 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
454 	int rc;
455 
456 	assert(transport != NULL);
457 
458 	/* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
459 	 * the IO qpair, in which case the transport object would be invalid (each process has their
460 	 * own unique transport objects since they contain function pointers).  So we look up the
461 	 * transport object in the delete_io_qpair case.
462 	 */
463 	rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
464 	if (rc != 0) {
465 		SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
466 			    transport->ops.name);
467 		assert(false);
468 	}
469 }
470 
471 static void
472 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
473 {
474 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
475 
476 	/* If the qpair was unable to reconnect, restore the original failure reason */
477 	qpair->transport_failure_reason = qpair->last_transport_failure_reason;
478 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
479 }
480 
481 int
482 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
483 {
484 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
485 	int rc;
486 
487 	assert(transport != NULL);
488 	if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
489 		qpair->transport = transport;
490 	}
491 
492 	qpair->last_transport_failure_reason = qpair->transport_failure_reason;
493 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
494 
495 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
496 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
497 	if (rc != 0) {
498 		goto err;
499 	}
500 
501 	if (qpair->poll_group) {
502 		rc = nvme_poll_group_connect_qpair(qpair);
503 		if (rc) {
504 			goto err;
505 		}
506 	}
507 
508 	if (!qpair->async) {
509 		/* Busy wait until the qpair exits the connecting state */
510 		while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
511 			if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
512 				rc = spdk_nvme_poll_group_process_completions(
513 					     qpair->poll_group->group, 0,
514 					     nvme_transport_connect_qpair_fail);
515 			} else {
516 				rc = spdk_nvme_qpair_process_completions(qpair, 0);
517 			}
518 
519 			if (rc < 0) {
520 				goto err;
521 			}
522 		}
523 	}
524 
525 	return 0;
526 err:
527 	nvme_transport_connect_qpair_fail(qpair, NULL);
528 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
529 		assert(qpair->async == true);
530 		/* Let the caller to poll the qpair until it is actually disconnected. */
531 		return 0;
532 	}
533 
534 	return rc;
535 }
536 
537 void
538 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
539 {
540 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
541 
542 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
543 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
544 		return;
545 	}
546 
547 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
548 	assert(transport != NULL);
549 
550 	if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
551 		nvme_poll_group_disconnect_qpair(qpair);
552 	}
553 
554 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
555 }
556 
557 void
558 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
559 {
560 	if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
561 	    nvme_qpair_is_admin_queue(qpair)) {
562 		nvme_qpair_abort_all_queued_reqs(qpair);
563 	}
564 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
565 }
566 
567 int
568 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
569 					struct spdk_memory_domain **domains, int array_size)
570 {
571 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
572 
573 	assert(transport != NULL);
574 	if (transport->ops.ctrlr_get_memory_domains) {
575 		return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
576 	}
577 
578 	return 0;
579 }
580 
581 void
582 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
583 {
584 	const struct spdk_nvme_transport *transport;
585 
586 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
587 		qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
588 	} else {
589 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
590 		assert(transport != NULL);
591 		transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
592 	}
593 }
594 
595 int
596 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
597 {
598 	const struct spdk_nvme_transport *transport;
599 
600 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
601 		return qpair->transport->ops.qpair_reset(qpair);
602 	}
603 
604 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
605 	assert(transport != NULL);
606 	return transport->ops.qpair_reset(qpair);
607 }
608 
609 int
610 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
611 {
612 	const struct spdk_nvme_transport *transport;
613 
614 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
615 		return qpair->transport->ops.qpair_submit_request(qpair, req);
616 	}
617 
618 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
619 	assert(transport != NULL);
620 	return transport->ops.qpair_submit_request(qpair, req);
621 }
622 
623 int32_t
624 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
625 {
626 	const struct spdk_nvme_transport *transport;
627 
628 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
629 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
630 	}
631 
632 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
633 	assert(transport != NULL);
634 	return transport->ops.qpair_process_completions(qpair, max_completions);
635 }
636 
637 int
638 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
639 				      int (*iter_fn)(struct nvme_request *req, void *arg),
640 				      void *arg)
641 {
642 	const struct spdk_nvme_transport *transport;
643 
644 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
645 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
646 	}
647 
648 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
649 	assert(transport != NULL);
650 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
651 }
652 
653 int
654 nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair)
655 {
656 	const struct spdk_nvme_transport *transport;
657 
658 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
659 	if (transport->ops.qpair_authenticate == NULL) {
660 		return -ENOTSUP;
661 	}
662 
663 	return transport->ops.qpair_authenticate(qpair);
664 }
665 
666 void
667 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
668 {
669 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
670 
671 	assert(transport != NULL);
672 	transport->ops.admin_qpair_abort_aers(qpair);
673 }
674 
675 struct spdk_nvme_transport_poll_group *
676 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
677 {
678 	struct spdk_nvme_transport_poll_group *group = NULL;
679 
680 	group = transport->ops.poll_group_create();
681 	if (group) {
682 		group->transport = transport;
683 		STAILQ_INIT(&group->connected_qpairs);
684 		STAILQ_INIT(&group->disconnected_qpairs);
685 		group->num_connected_qpairs = 0;
686 	}
687 
688 	return group;
689 }
690 
691 struct spdk_nvme_transport_poll_group *
692 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
693 		struct spdk_nvme_qpair *qpair)
694 {
695 	if (transport->ops.qpair_get_optimal_poll_group) {
696 		return transport->ops.qpair_get_optimal_poll_group(qpair);
697 	} else {
698 		return NULL;
699 	}
700 }
701 
702 int
703 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
704 			      struct spdk_nvme_qpair *qpair)
705 {
706 	int rc;
707 
708 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
709 	if (rc == 0) {
710 		qpair->poll_group = tgroup;
711 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
712 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
713 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
714 	}
715 
716 	return rc;
717 }
718 
719 int
720 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
721 				 struct spdk_nvme_qpair *qpair)
722 {
723 	int rc __attribute__((unused));
724 
725 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
726 		return -EINVAL;
727 	} else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
728 		return -ENOENT;
729 	}
730 
731 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
732 	assert(rc == 0);
733 
734 	STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
735 
736 	qpair->poll_group = NULL;
737 	qpair->poll_group_tailq_head = NULL;
738 
739 	return 0;
740 }
741 
742 int64_t
743 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
744 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
745 {
746 	return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
747 			disconnected_qpair_cb);
748 }
749 
750 int
751 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
752 {
753 	return tgroup->transport->ops.poll_group_destroy(tgroup);
754 }
755 
756 int
757 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
758 {
759 	struct spdk_nvme_transport_poll_group *tgroup;
760 	int rc __attribute__((unused));
761 
762 	tgroup = qpair->poll_group;
763 
764 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
765 		return 0;
766 	}
767 
768 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
769 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
770 		assert(rc == 0);
771 
772 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
773 		STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
774 		assert(tgroup->num_connected_qpairs > 0);
775 		tgroup->num_connected_qpairs--;
776 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
777 
778 		return 0;
779 	}
780 
781 	return -EINVAL;
782 }
783 
784 int
785 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
786 {
787 	struct spdk_nvme_transport_poll_group *tgroup;
788 	int rc;
789 
790 	tgroup = qpair->poll_group;
791 
792 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
793 		return 0;
794 	}
795 
796 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
797 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
798 		if (rc == 0) {
799 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
800 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
801 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
802 			tgroup->num_connected_qpairs++;
803 		}
804 
805 		return rc == -EINPROGRESS ? 0 : rc;
806 	}
807 
808 
809 	return -EINVAL;
810 }
811 
812 int
813 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
814 				    struct spdk_nvme_transport_poll_group_stat **stats)
815 {
816 	if (tgroup->transport->ops.poll_group_get_stats) {
817 		return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
818 	}
819 	return -ENOTSUP;
820 }
821 
822 void
823 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
824 				     struct spdk_nvme_transport_poll_group_stat *stats)
825 {
826 	if (tgroup->transport->ops.poll_group_free_stats) {
827 		tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
828 	}
829 }
830 
831 spdk_nvme_transport_type_t
832 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
833 {
834 	return transport->ops.type;
835 }
836 
837 void
838 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
839 {
840 	if (opts == NULL) {
841 		SPDK_ERRLOG("opts should not be NULL.\n");
842 		return;
843 	}
844 
845 	if (opts_size == 0) {
846 		SPDK_ERRLOG("opts_size should not be zero.\n");
847 		return;
848 	}
849 
850 	opts->opts_size = opts_size;
851 
852 #define SET_FIELD(field) \
853 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
854 		opts->field = g_spdk_nvme_transport_opts.field; \
855 	} \
856 
857 	SET_FIELD(rdma_srq_size);
858 	SET_FIELD(rdma_max_cq_size);
859 	SET_FIELD(rdma_cm_event_timeout_ms);
860 
861 	/* Do not remove this statement, you should always update this statement when you adding a new field,
862 	 * and do not forget to add the SET_FIELD statement for your added field. */
863 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
864 
865 #undef SET_FIELD
866 }
867 
868 int
869 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
870 {
871 	if (opts == NULL) {
872 		SPDK_ERRLOG("opts should not be NULL.\n");
873 		return -EINVAL;
874 	}
875 
876 	if (opts_size == 0) {
877 		SPDK_ERRLOG("opts_size should not be zero.\n");
878 		return -EINVAL;
879 	}
880 
881 #define SET_FIELD(field) \
882 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
883 		g_spdk_nvme_transport_opts.field = opts->field; \
884 	} \
885 
886 	SET_FIELD(rdma_srq_size);
887 	SET_FIELD(rdma_max_cq_size);
888 	SET_FIELD(rdma_cm_event_timeout_ms);
889 
890 	g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
891 
892 #undef SET_FIELD
893 
894 	return 0;
895 }
896 
897 volatile struct spdk_nvme_registers *
898 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
899 {
900 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
901 
902 	if (transport == NULL) {
903 		/* Transport does not exist. */
904 		return NULL;
905 	}
906 
907 	if (transport->ops.ctrlr_get_registers) {
908 		return transport->ops.ctrlr_get_registers(ctrlr);
909 	}
910 
911 	return NULL;
912 }
913