xref: /spdk/lib/nvme/nvme_transport.c (revision 14e26b9d0410a98689caffcba7bfacac8d85c74d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe transport abstraction
10  */
11 
12 #include "nvme_internal.h"
13 #include "spdk/queue.h"
14 
15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 
17 struct spdk_nvme_transport {
18 	struct spdk_nvme_transport_ops	ops;
19 	TAILQ_ENTRY(spdk_nvme_transport)	link;
20 };
21 
22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 
25 struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 int g_current_transport_index = 0;
27 
28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 	.rdma_srq_size = 0,
30 	.rdma_max_cq_size = 0,
31 	.rdma_cm_event_timeout_ms = 1000
32 };
33 
34 const struct spdk_nvme_transport *
35 nvme_get_first_transport(void)
36 {
37 	return TAILQ_FIRST(&g_spdk_nvme_transports);
38 }
39 
40 const struct spdk_nvme_transport *
41 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
42 {
43 	return TAILQ_NEXT(transport, link);
44 }
45 
46 /*
47  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
48  * transport object in either the controller struct or the admin qpair. This means
49  * that a lot of admin related transport calls will have to call nvme_get_transport
50  * in order to know which functions to call.
51  * In the I/O path, we have the ability to store the transport struct in the I/O
52  * qpairs to avoid taking a performance hit.
53  */
54 const struct spdk_nvme_transport *
55 nvme_get_transport(const char *transport_name)
56 {
57 	struct spdk_nvme_transport *registered_transport;
58 
59 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
60 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
61 			return registered_transport;
62 		}
63 	}
64 
65 	return NULL;
66 }
67 
68 bool
69 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
70 {
71 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
72 }
73 
74 bool
75 spdk_nvme_transport_available_by_name(const char *transport_name)
76 {
77 	return nvme_get_transport(transport_name) == NULL ? false : true;
78 }
79 
80 void
81 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
82 {
83 	struct spdk_nvme_transport *new_transport;
84 
85 	if (nvme_get_transport(ops->name)) {
86 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
87 		assert(false);
88 	}
89 
90 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
91 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
92 		assert(false);
93 		return;
94 	}
95 	new_transport = &g_spdk_transports[g_current_transport_index++];
96 
97 	new_transport->ops = *ops;
98 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
99 }
100 
101 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
102 		const struct spdk_nvme_ctrlr_opts *opts,
103 		void *devhandle)
104 {
105 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
106 	struct spdk_nvme_ctrlr *ctrlr;
107 
108 	if (transport == NULL) {
109 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
110 		return NULL;
111 	}
112 
113 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
114 
115 	return ctrlr;
116 }
117 
118 int
119 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
120 			  bool direct_connect)
121 {
122 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
123 
124 	if (transport == NULL) {
125 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
126 		return -ENOENT;
127 	}
128 
129 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
130 }
131 
132 int
133 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
134 {
135 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
136 
137 	assert(transport != NULL);
138 	return transport->ops.ctrlr_destruct(ctrlr);
139 }
140 
141 int
142 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
143 {
144 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
145 
146 	assert(transport != NULL);
147 	return transport->ops.ctrlr_enable(ctrlr);
148 }
149 
150 int
151 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
152 {
153 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
154 
155 	assert(transport != NULL);
156 	if (transport->ops.ctrlr_ready) {
157 		return transport->ops.ctrlr_ready(ctrlr);
158 	}
159 
160 	return 0;
161 }
162 
163 int
164 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
165 {
166 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
167 
168 	assert(transport != NULL);
169 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
170 }
171 
172 int
173 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
174 {
175 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
176 
177 	assert(transport != NULL);
178 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
179 }
180 
181 int
182 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
183 {
184 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
185 
186 	assert(transport != NULL);
187 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
188 }
189 
190 int
191 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
192 {
193 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
194 
195 	assert(transport != NULL);
196 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
197 }
198 
199 static int
200 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
201 		spdk_nvme_reg_cb cb_fn, void *cb_ctx)
202 {
203 	struct nvme_register_completion *ctx;
204 
205 	ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
206 	if (ctx == NULL) {
207 		return -ENOMEM;
208 	}
209 
210 	ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
211 	ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
212 	ctx->cb_fn = cb_fn;
213 	ctx->cb_ctx = cb_ctx;
214 	ctx->value = value;
215 	ctx->pid = getpid();
216 
217 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
218 	STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
219 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
220 
221 	return 0;
222 }
223 
224 int
225 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
226 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
227 {
228 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
229 	int rc;
230 
231 	assert(transport != NULL);
232 	if (transport->ops.ctrlr_set_reg_4_async == NULL) {
233 		rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
234 		if (rc != 0) {
235 			return rc;
236 		}
237 
238 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
239 	}
240 
241 	return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
242 }
243 
244 int
245 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
246 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
247 
248 {
249 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
250 	int rc;
251 
252 	assert(transport != NULL);
253 	if (transport->ops.ctrlr_set_reg_8_async == NULL) {
254 		rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
255 		if (rc != 0) {
256 			return rc;
257 		}
258 
259 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
260 	}
261 
262 	return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
263 }
264 
265 int
266 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
267 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
268 {
269 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
270 	uint32_t value;
271 	int rc;
272 
273 	assert(transport != NULL);
274 	if (transport->ops.ctrlr_get_reg_4_async == NULL) {
275 		rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
276 		if (rc != 0) {
277 			return rc;
278 		}
279 
280 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
281 	}
282 
283 	return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
284 }
285 
286 int
287 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
288 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
289 {
290 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
291 	uint64_t value;
292 	int rc;
293 
294 	assert(transport != NULL);
295 	if (transport->ops.ctrlr_get_reg_8_async == NULL) {
296 		rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
297 		if (rc != 0) {
298 			return rc;
299 		}
300 
301 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
302 	}
303 
304 	return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
305 }
306 
307 uint32_t
308 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
309 {
310 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
311 
312 	assert(transport != NULL);
313 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
314 }
315 
316 uint16_t
317 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
318 {
319 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
320 
321 	assert(transport != NULL);
322 	return transport->ops.ctrlr_get_max_sges(ctrlr);
323 }
324 
325 int
326 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
327 {
328 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
329 
330 	assert(transport != NULL);
331 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
332 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
333 	}
334 
335 	return -ENOTSUP;
336 }
337 
338 void *
339 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
340 {
341 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
342 
343 	assert(transport != NULL);
344 	if (transport->ops.ctrlr_map_cmb != NULL) {
345 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
346 	}
347 
348 	return NULL;
349 }
350 
351 int
352 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
353 {
354 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
355 
356 	assert(transport != NULL);
357 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
358 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
359 	}
360 
361 	return 0;
362 }
363 
364 int
365 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
366 {
367 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
368 
369 	assert(transport != NULL);
370 	if (transport->ops.ctrlr_enable_pmr != NULL) {
371 		return transport->ops.ctrlr_enable_pmr(ctrlr);
372 	}
373 
374 	return -ENOSYS;
375 }
376 
377 int
378 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
379 {
380 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
381 
382 	assert(transport != NULL);
383 	if (transport->ops.ctrlr_disable_pmr != NULL) {
384 		return transport->ops.ctrlr_disable_pmr(ctrlr);
385 	}
386 
387 	return -ENOSYS;
388 }
389 
390 void *
391 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
392 {
393 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
394 
395 	assert(transport != NULL);
396 	if (transport->ops.ctrlr_map_pmr != NULL) {
397 		return transport->ops.ctrlr_map_pmr(ctrlr, size);
398 	}
399 
400 	return NULL;
401 }
402 
403 int
404 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
405 {
406 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
407 
408 	assert(transport != NULL);
409 	if (transport->ops.ctrlr_unmap_pmr != NULL) {
410 		return transport->ops.ctrlr_unmap_pmr(ctrlr);
411 	}
412 
413 	return -ENOSYS;
414 }
415 
416 struct spdk_nvme_qpair *
417 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
418 				     const struct spdk_nvme_io_qpair_opts *opts)
419 {
420 	struct spdk_nvme_qpair *qpair;
421 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
422 
423 	assert(transport != NULL);
424 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
425 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
426 		qpair->transport = transport;
427 	}
428 
429 	return qpair;
430 }
431 
432 void
433 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
434 {
435 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
436 	int rc;
437 
438 	assert(transport != NULL);
439 
440 	/* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
441 	 * the IO qpair, in which case the transport object would be invalid (each process has their
442 	 * own unique transport objects since they contain function pointers).  So we look up the
443 	 * transport object in the delete_io_qpair case.
444 	 */
445 	rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
446 	if (rc != 0) {
447 		SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
448 			    transport->ops.name);
449 		assert(false);
450 	}
451 }
452 
453 static void
454 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
455 {
456 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
457 
458 	/* If the qpair was unable to reconnect, restore the original failure reason */
459 	qpair->transport_failure_reason = qpair->last_transport_failure_reason;
460 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
461 }
462 
463 int
464 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
465 {
466 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
467 	int rc;
468 
469 	assert(transport != NULL);
470 	if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
471 		qpair->transport = transport;
472 	}
473 
474 	qpair->last_transport_failure_reason = qpair->transport_failure_reason;
475 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
476 
477 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
478 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
479 	if (rc != 0) {
480 		goto err;
481 	}
482 
483 	if (qpair->poll_group) {
484 		rc = nvme_poll_group_connect_qpair(qpair);
485 		if (rc) {
486 			goto err;
487 		}
488 	}
489 
490 	if (!qpair->async) {
491 		/* Busy wait until the qpair exits the connecting state */
492 		while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
493 			if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
494 				rc = spdk_nvme_poll_group_process_completions(
495 					     qpair->poll_group->group, 0,
496 					     nvme_transport_connect_qpair_fail);
497 			} else {
498 				rc = spdk_nvme_qpair_process_completions(qpair, 0);
499 			}
500 
501 			if (rc < 0) {
502 				goto err;
503 			}
504 		}
505 	}
506 
507 	return 0;
508 err:
509 	nvme_transport_connect_qpair_fail(qpair, NULL);
510 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
511 		assert(qpair->async == true);
512 		/* Let the caller to poll the qpair until it is actually disconnected. */
513 		return 0;
514 	}
515 
516 	return rc;
517 }
518 
519 void
520 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
521 {
522 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
523 
524 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
525 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
526 		return;
527 	}
528 
529 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
530 	assert(transport != NULL);
531 
532 	if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
533 		nvme_poll_group_disconnect_qpair(qpair);
534 	}
535 
536 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
537 }
538 
539 void
540 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
541 {
542 	if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
543 	    nvme_qpair_is_admin_queue(qpair)) {
544 		nvme_qpair_abort_all_queued_reqs(qpair);
545 	}
546 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
547 }
548 
549 int
550 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
551 					struct spdk_memory_domain **domains, int array_size)
552 {
553 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
554 
555 	assert(transport != NULL);
556 	if (transport->ops.ctrlr_get_memory_domains) {
557 		return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
558 	}
559 
560 	return 0;
561 }
562 
563 void
564 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
565 {
566 	const struct spdk_nvme_transport *transport;
567 
568 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
569 		qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
570 	} else {
571 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
572 		assert(transport != NULL);
573 		transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
574 	}
575 }
576 
577 int
578 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
579 {
580 	const struct spdk_nvme_transport *transport;
581 
582 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
583 		return qpair->transport->ops.qpair_reset(qpair);
584 	}
585 
586 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
587 	assert(transport != NULL);
588 	return transport->ops.qpair_reset(qpair);
589 }
590 
591 int
592 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
593 {
594 	const struct spdk_nvme_transport *transport;
595 
596 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
597 		return qpair->transport->ops.qpair_submit_request(qpair, req);
598 	}
599 
600 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
601 	assert(transport != NULL);
602 	return transport->ops.qpair_submit_request(qpair, req);
603 }
604 
605 int32_t
606 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
607 {
608 	const struct spdk_nvme_transport *transport;
609 
610 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
611 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
612 	}
613 
614 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
615 	assert(transport != NULL);
616 	return transport->ops.qpair_process_completions(qpair, max_completions);
617 }
618 
619 int
620 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
621 				      int (*iter_fn)(struct nvme_request *req, void *arg),
622 				      void *arg)
623 {
624 	const struct spdk_nvme_transport *transport;
625 
626 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
627 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
628 	}
629 
630 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
631 	assert(transport != NULL);
632 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
633 }
634 
635 void
636 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
637 {
638 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
639 
640 	assert(transport != NULL);
641 	transport->ops.admin_qpair_abort_aers(qpair);
642 }
643 
644 struct spdk_nvme_transport_poll_group *
645 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
646 {
647 	struct spdk_nvme_transport_poll_group *group = NULL;
648 
649 	group = transport->ops.poll_group_create();
650 	if (group) {
651 		group->transport = transport;
652 		STAILQ_INIT(&group->connected_qpairs);
653 		STAILQ_INIT(&group->disconnected_qpairs);
654 		group->num_connected_qpairs = 0;
655 	}
656 
657 	return group;
658 }
659 
660 struct spdk_nvme_transport_poll_group *
661 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
662 		struct spdk_nvme_qpair *qpair)
663 {
664 	if (transport->ops.qpair_get_optimal_poll_group) {
665 		return transport->ops.qpair_get_optimal_poll_group(qpair);
666 	} else {
667 		return NULL;
668 	}
669 }
670 
671 int
672 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
673 			      struct spdk_nvme_qpair *qpair)
674 {
675 	int rc;
676 
677 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
678 	if (rc == 0) {
679 		qpair->poll_group = tgroup;
680 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
681 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
682 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
683 	}
684 
685 	return rc;
686 }
687 
688 int
689 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
690 				 struct spdk_nvme_qpair *qpair)
691 {
692 	int rc __attribute__((unused));
693 
694 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
695 		return -EINVAL;
696 	} else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
697 		return -ENOENT;
698 	}
699 
700 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
701 	assert(rc == 0);
702 
703 	STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
704 
705 	qpair->poll_group = NULL;
706 	qpair->poll_group_tailq_head = NULL;
707 
708 	return 0;
709 }
710 
711 int64_t
712 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
713 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
714 {
715 	return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
716 			disconnected_qpair_cb);
717 }
718 
719 int
720 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
721 {
722 	return tgroup->transport->ops.poll_group_destroy(tgroup);
723 }
724 
725 int
726 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
727 {
728 	struct spdk_nvme_transport_poll_group *tgroup;
729 	int rc __attribute__((unused));
730 
731 	tgroup = qpair->poll_group;
732 
733 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
734 		return 0;
735 	}
736 
737 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
738 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
739 		assert(rc == 0);
740 
741 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
742 		STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
743 		assert(tgroup->num_connected_qpairs > 0);
744 		tgroup->num_connected_qpairs--;
745 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
746 
747 		return 0;
748 	}
749 
750 	return -EINVAL;
751 }
752 
753 int
754 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
755 {
756 	struct spdk_nvme_transport_poll_group *tgroup;
757 	int rc;
758 
759 	tgroup = qpair->poll_group;
760 
761 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
762 		return 0;
763 	}
764 
765 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
766 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
767 		if (rc == 0) {
768 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
769 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
770 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
771 			tgroup->num_connected_qpairs++;
772 		}
773 
774 		return rc == -EINPROGRESS ? 0 : rc;
775 	}
776 
777 
778 	return -EINVAL;
779 }
780 
781 int
782 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
783 				    struct spdk_nvme_transport_poll_group_stat **stats)
784 {
785 	if (tgroup->transport->ops.poll_group_get_stats) {
786 		return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
787 	}
788 	return -ENOTSUP;
789 }
790 
791 void
792 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
793 				     struct spdk_nvme_transport_poll_group_stat *stats)
794 {
795 	if (tgroup->transport->ops.poll_group_free_stats) {
796 		tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
797 	}
798 }
799 
800 spdk_nvme_transport_type_t
801 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
802 {
803 	return transport->ops.type;
804 }
805 
806 void
807 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
808 {
809 	if (opts == NULL) {
810 		SPDK_ERRLOG("opts should not be NULL.\n");
811 		return;
812 	}
813 
814 	if (opts_size == 0) {
815 		SPDK_ERRLOG("opts_size should not be zero.\n");
816 		return;
817 	}
818 
819 	opts->opts_size = opts_size;
820 
821 #define SET_FIELD(field) \
822 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
823 		opts->field = g_spdk_nvme_transport_opts.field; \
824 	} \
825 
826 	SET_FIELD(rdma_srq_size);
827 	SET_FIELD(rdma_max_cq_size);
828 	SET_FIELD(rdma_cm_event_timeout_ms);
829 
830 	/* Do not remove this statement, you should always update this statement when you adding a new field,
831 	 * and do not forget to add the SET_FIELD statement for your added field. */
832 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
833 
834 #undef SET_FIELD
835 }
836 
837 int
838 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
839 {
840 	if (opts == NULL) {
841 		SPDK_ERRLOG("opts should not be NULL.\n");
842 		return -EINVAL;
843 	}
844 
845 	if (opts_size == 0) {
846 		SPDK_ERRLOG("opts_size should not be zero.\n");
847 		return -EINVAL;
848 	}
849 
850 #define SET_FIELD(field) \
851 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
852 		g_spdk_nvme_transport_opts.field = opts->field; \
853 	} \
854 
855 	SET_FIELD(rdma_srq_size);
856 	SET_FIELD(rdma_max_cq_size);
857 	SET_FIELD(rdma_cm_event_timeout_ms);
858 
859 	g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
860 
861 #undef SET_FIELD
862 
863 	return 0;
864 }
865 
866 volatile struct spdk_nvme_registers *
867 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
868 {
869 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
870 
871 	if (transport == NULL) {
872 		/* Transport does not exist. */
873 		return NULL;
874 	}
875 
876 	if (transport->ops.ctrlr_get_registers) {
877 		return transport->ops.ctrlr_get_registers(ctrlr);
878 	}
879 
880 	return NULL;
881 }
882