xref: /spdk/lib/nvme/nvme_transport.c (revision f6866117acb32c78d5ea7bd76ba330284655af35)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe transport abstraction
10  */
11 
12 #include "nvme_internal.h"
13 #include "spdk/queue.h"
14 
15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 
17 struct spdk_nvme_transport {
18 	struct spdk_nvme_transport_ops	ops;
19 	TAILQ_ENTRY(spdk_nvme_transport)	link;
20 };
21 
22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 
25 struct spdk_nvme_transport g_spdk_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 int g_current_transport_index = 0;
27 
28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 	.rdma_srq_size = 0,
30 };
31 
32 const struct spdk_nvme_transport *
33 nvme_get_first_transport(void)
34 {
35 	return TAILQ_FIRST(&g_spdk_nvme_transports);
36 }
37 
38 const struct spdk_nvme_transport *
39 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
40 {
41 	return TAILQ_NEXT(transport, link);
42 }
43 
44 /*
45  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
46  * transport object in either the controller struct or the admin qpair. THis means
47  * that a lot of admin related transport calls will have to call nvme_get_transport
48  * in order to know which functions to call.
49  * In the I/O path, we have the ability to store the transport struct in the I/O
50  * qpairs to avoid taking a performance hit.
51  */
52 const struct spdk_nvme_transport *
53 nvme_get_transport(const char *transport_name)
54 {
55 	struct spdk_nvme_transport *registered_transport;
56 
57 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
58 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
59 			return registered_transport;
60 		}
61 	}
62 
63 	return NULL;
64 }
65 
66 bool
67 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
68 {
69 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
70 }
71 
72 bool
73 spdk_nvme_transport_available_by_name(const char *transport_name)
74 {
75 	return nvme_get_transport(transport_name) == NULL ? false : true;
76 }
77 
78 void
79 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
80 {
81 	struct spdk_nvme_transport *new_transport;
82 
83 	if (nvme_get_transport(ops->name)) {
84 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
85 		assert(false);
86 	}
87 
88 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
89 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
90 		assert(false);
91 		return;
92 	}
93 	new_transport = &g_spdk_transports[g_current_transport_index++];
94 
95 	new_transport->ops = *ops;
96 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
97 }
98 
99 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
100 		const struct spdk_nvme_ctrlr_opts *opts,
101 		void *devhandle)
102 {
103 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
104 	struct spdk_nvme_ctrlr *ctrlr;
105 
106 	if (transport == NULL) {
107 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
108 		return NULL;
109 	}
110 
111 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
112 
113 	return ctrlr;
114 }
115 
116 int
117 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
118 			  bool direct_connect)
119 {
120 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
121 
122 	if (transport == NULL) {
123 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
124 		return -ENOENT;
125 	}
126 
127 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
128 }
129 
130 int
131 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
132 {
133 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
134 
135 	assert(transport != NULL);
136 	return transport->ops.ctrlr_destruct(ctrlr);
137 }
138 
139 int
140 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
141 {
142 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
143 
144 	assert(transport != NULL);
145 	return transport->ops.ctrlr_enable(ctrlr);
146 }
147 
148 int
149 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
150 {
151 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
152 
153 	assert(transport != NULL);
154 	if (transport->ops.ctrlr_ready) {
155 		return transport->ops.ctrlr_ready(ctrlr);
156 	}
157 
158 	return 0;
159 }
160 
161 int
162 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
163 {
164 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
165 
166 	assert(transport != NULL);
167 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
168 }
169 
170 int
171 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
172 {
173 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
174 
175 	assert(transport != NULL);
176 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
177 }
178 
179 int
180 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
181 {
182 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
183 
184 	assert(transport != NULL);
185 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
186 }
187 
188 int
189 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
190 {
191 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
192 
193 	assert(transport != NULL);
194 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
195 }
196 
197 static int
198 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
199 		spdk_nvme_reg_cb cb_fn, void *cb_ctx)
200 {
201 	struct nvme_register_completion *ctx;
202 
203 	ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
204 	if (ctx == NULL) {
205 		return -ENOMEM;
206 	}
207 
208 	ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
209 	ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
210 	ctx->cb_fn = cb_fn;
211 	ctx->cb_ctx = cb_ctx;
212 	ctx->value = value;
213 	ctx->pid = getpid();
214 
215 	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
216 	STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
217 	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
218 
219 	return 0;
220 }
221 
222 int
223 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
224 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
225 {
226 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
227 	int rc;
228 
229 	assert(transport != NULL);
230 	if (transport->ops.ctrlr_set_reg_4_async == NULL) {
231 		rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
232 		if (rc != 0) {
233 			return rc;
234 		}
235 
236 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
237 	}
238 
239 	return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
240 }
241 
242 int
243 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
244 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
245 
246 {
247 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
248 	int rc;
249 
250 	assert(transport != NULL);
251 	if (transport->ops.ctrlr_set_reg_8_async == NULL) {
252 		rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
253 		if (rc != 0) {
254 			return rc;
255 		}
256 
257 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
258 	}
259 
260 	return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
261 }
262 
263 int
264 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
265 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
266 {
267 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
268 	uint32_t value;
269 	int rc;
270 
271 	assert(transport != NULL);
272 	if (transport->ops.ctrlr_get_reg_4_async == NULL) {
273 		rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
274 		if (rc != 0) {
275 			return rc;
276 		}
277 
278 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
279 	}
280 
281 	return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
282 }
283 
284 int
285 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
286 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
287 {
288 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
289 	uint64_t value;
290 	int rc;
291 
292 	assert(transport != NULL);
293 	if (transport->ops.ctrlr_get_reg_8_async == NULL) {
294 		rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
295 		if (rc != 0) {
296 			return rc;
297 		}
298 
299 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
300 	}
301 
302 	return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
303 }
304 
305 uint32_t
306 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
307 {
308 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
309 
310 	assert(transport != NULL);
311 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
312 }
313 
314 uint16_t
315 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
316 {
317 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
318 
319 	assert(transport != NULL);
320 	return transport->ops.ctrlr_get_max_sges(ctrlr);
321 }
322 
323 int
324 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
325 {
326 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
327 
328 	assert(transport != NULL);
329 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
330 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
331 	}
332 
333 	return -ENOTSUP;
334 }
335 
336 void *
337 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
338 {
339 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
340 
341 	assert(transport != NULL);
342 	if (transport->ops.ctrlr_map_cmb != NULL) {
343 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
344 	}
345 
346 	return NULL;
347 }
348 
349 int
350 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
351 {
352 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
353 
354 	assert(transport != NULL);
355 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
356 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
357 	}
358 
359 	return 0;
360 }
361 
362 int
363 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
364 {
365 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
366 
367 	assert(transport != NULL);
368 	if (transport->ops.ctrlr_enable_pmr != NULL) {
369 		return transport->ops.ctrlr_enable_pmr(ctrlr);
370 	}
371 
372 	return -ENOSYS;
373 }
374 
375 int
376 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
377 {
378 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
379 
380 	assert(transport != NULL);
381 	if (transport->ops.ctrlr_disable_pmr != NULL) {
382 		return transport->ops.ctrlr_disable_pmr(ctrlr);
383 	}
384 
385 	return -ENOSYS;
386 }
387 
388 void *
389 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
390 {
391 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
392 
393 	assert(transport != NULL);
394 	if (transport->ops.ctrlr_map_pmr != NULL) {
395 		return transport->ops.ctrlr_map_pmr(ctrlr, size);
396 	}
397 
398 	return NULL;
399 }
400 
401 int
402 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
403 {
404 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
405 
406 	assert(transport != NULL);
407 	if (transport->ops.ctrlr_unmap_pmr != NULL) {
408 		return transport->ops.ctrlr_unmap_pmr(ctrlr);
409 	}
410 
411 	return -ENOSYS;
412 }
413 
414 struct spdk_nvme_qpair *
415 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
416 				     const struct spdk_nvme_io_qpair_opts *opts)
417 {
418 	struct spdk_nvme_qpair *qpair;
419 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
420 
421 	assert(transport != NULL);
422 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
423 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
424 		qpair->transport = transport;
425 	}
426 
427 	return qpair;
428 }
429 
430 void
431 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
432 {
433 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
434 	int rc;
435 
436 	assert(transport != NULL);
437 
438 	/* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
439 	 * the IO qpair, in which case the transport object would be invalid (each process has their
440 	 * own unique transport objects since they contain function pointers).  So we look up the
441 	 * transport object in the delete_io_qpair case.
442 	 */
443 	rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
444 	if (rc != 0) {
445 		SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
446 			    transport->ops.name);
447 		assert(false);
448 	}
449 }
450 
451 static void
452 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
453 {
454 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
455 
456 	/* If the qpair was unable to reconnect, restore the original failure reason */
457 	qpair->transport_failure_reason = qpair->last_transport_failure_reason;
458 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
459 }
460 
461 int
462 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
463 {
464 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
465 	int rc;
466 
467 	assert(transport != NULL);
468 	if (!nvme_qpair_is_admin_queue(qpair)) {
469 		qpair->transport = transport;
470 	}
471 
472 	qpair->last_transport_failure_reason = qpair->transport_failure_reason;
473 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
474 
475 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
476 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
477 	if (rc != 0) {
478 		goto err;
479 	}
480 
481 	if (qpair->poll_group) {
482 		rc = nvme_poll_group_connect_qpair(qpair);
483 		if (rc) {
484 			goto err;
485 		}
486 	}
487 
488 	if (!qpair->async) {
489 		/* Busy wait until the qpair exits the connecting state */
490 		while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
491 			if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
492 				rc = spdk_nvme_poll_group_process_completions(
493 					     qpair->poll_group->group, 0,
494 					     nvme_transport_connect_qpair_fail);
495 			} else {
496 				rc = spdk_nvme_qpair_process_completions(qpair, 0);
497 			}
498 
499 			if (rc < 0) {
500 				goto err;
501 			}
502 		}
503 	}
504 
505 	return 0;
506 err:
507 	nvme_transport_connect_qpair_fail(qpair, NULL);
508 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
509 		assert(qpair->async == true);
510 		/* Let the caller to poll the qpair until it is actually disconnected. */
511 		return 0;
512 	}
513 
514 	return rc;
515 }
516 
517 void
518 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
519 {
520 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
521 
522 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
523 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
524 		return;
525 	}
526 
527 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
528 	assert(transport != NULL);
529 
530 	if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
531 		nvme_poll_group_disconnect_qpair(qpair);
532 	}
533 
534 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
535 }
536 
537 void
538 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
539 {
540 	if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
541 	    nvme_qpair_is_admin_queue(qpair)) {
542 		nvme_qpair_abort_all_queued_reqs(qpair, 0);
543 	}
544 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
545 }
546 
547 int
548 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
549 					struct spdk_memory_domain **domains, int array_size)
550 {
551 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
552 
553 	assert(transport != NULL);
554 	if (transport->ops.ctrlr_get_memory_domains) {
555 		return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
556 	}
557 
558 	return 0;
559 }
560 
561 void
562 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
563 {
564 	const struct spdk_nvme_transport *transport;
565 
566 	assert(dnr <= 1);
567 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
568 		qpair->transport->ops.qpair_abort_reqs(qpair, dnr);
569 	} else {
570 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
571 		assert(transport != NULL);
572 		transport->ops.qpair_abort_reqs(qpair, dnr);
573 	}
574 }
575 
576 int
577 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
578 {
579 	const struct spdk_nvme_transport *transport;
580 
581 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
582 		return qpair->transport->ops.qpair_reset(qpair);
583 	}
584 
585 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
586 	assert(transport != NULL);
587 	return transport->ops.qpair_reset(qpair);
588 }
589 
590 int
591 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
592 {
593 	const struct spdk_nvme_transport *transport;
594 
595 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
596 		return qpair->transport->ops.qpair_submit_request(qpair, req);
597 	}
598 
599 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
600 	assert(transport != NULL);
601 	return transport->ops.qpair_submit_request(qpair, req);
602 }
603 
604 int32_t
605 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
606 {
607 	const struct spdk_nvme_transport *transport;
608 
609 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
610 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
611 	}
612 
613 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
614 	assert(transport != NULL);
615 	return transport->ops.qpair_process_completions(qpair, max_completions);
616 }
617 
618 int
619 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
620 				      int (*iter_fn)(struct nvme_request *req, void *arg),
621 				      void *arg)
622 {
623 	const struct spdk_nvme_transport *transport;
624 
625 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
626 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
627 	}
628 
629 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
630 	assert(transport != NULL);
631 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
632 }
633 
634 void
635 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
636 {
637 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
638 
639 	assert(transport != NULL);
640 	transport->ops.admin_qpair_abort_aers(qpair);
641 }
642 
643 struct spdk_nvme_transport_poll_group *
644 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
645 {
646 	struct spdk_nvme_transport_poll_group *group = NULL;
647 
648 	group = transport->ops.poll_group_create();
649 	if (group) {
650 		group->transport = transport;
651 		STAILQ_INIT(&group->connected_qpairs);
652 		STAILQ_INIT(&group->disconnected_qpairs);
653 	}
654 
655 	return group;
656 }
657 
658 struct spdk_nvme_transport_poll_group *
659 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
660 		struct spdk_nvme_qpair *qpair)
661 {
662 	if (transport->ops.qpair_get_optimal_poll_group) {
663 		return transport->ops.qpair_get_optimal_poll_group(qpair);
664 	} else {
665 		return NULL;
666 	}
667 }
668 
669 int
670 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
671 			      struct spdk_nvme_qpair *qpair)
672 {
673 	int rc;
674 
675 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
676 	if (rc == 0) {
677 		qpair->poll_group = tgroup;
678 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
679 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
680 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
681 	}
682 
683 	return rc;
684 }
685 
686 int
687 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
688 				 struct spdk_nvme_qpair *qpair)
689 {
690 	int rc __attribute__((unused));
691 
692 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
693 		return -EINVAL;
694 	} else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
695 		return -ENOENT;
696 	}
697 
698 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
699 	assert(rc == 0);
700 
701 	STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
702 
703 	qpair->poll_group = NULL;
704 	qpair->poll_group_tailq_head = NULL;
705 
706 	return 0;
707 }
708 
709 int64_t
710 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
711 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
712 {
713 	return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
714 			disconnected_qpair_cb);
715 }
716 
717 int
718 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
719 {
720 	return tgroup->transport->ops.poll_group_destroy(tgroup);
721 }
722 
723 int
724 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
725 {
726 	struct spdk_nvme_transport_poll_group *tgroup;
727 	int rc __attribute__((unused));
728 
729 	tgroup = qpair->poll_group;
730 
731 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
732 		return 0;
733 	}
734 
735 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
736 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
737 		assert(rc == 0);
738 
739 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
740 		STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
741 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
742 
743 		return 0;
744 	}
745 
746 	return -EINVAL;
747 }
748 
749 int
750 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
751 {
752 	struct spdk_nvme_transport_poll_group *tgroup;
753 	int rc;
754 
755 	tgroup = qpair->poll_group;
756 
757 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
758 		return 0;
759 	}
760 
761 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
762 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
763 		if (rc == 0) {
764 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
765 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
766 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
767 		}
768 
769 		return rc == -EINPROGRESS ? 0 : rc;
770 	}
771 
772 
773 	return -EINVAL;
774 }
775 
776 int
777 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
778 				    struct spdk_nvme_transport_poll_group_stat **stats)
779 {
780 	if (tgroup->transport->ops.poll_group_get_stats) {
781 		return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
782 	}
783 	return -ENOTSUP;
784 }
785 
786 void
787 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
788 				     struct spdk_nvme_transport_poll_group_stat *stats)
789 {
790 	if (tgroup->transport->ops.poll_group_free_stats) {
791 		tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
792 	}
793 }
794 
795 spdk_nvme_transport_type_t
796 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
797 {
798 	return transport->ops.type;
799 }
800 
801 void
802 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
803 {
804 	if (opts == NULL) {
805 		SPDK_ERRLOG("opts should not be NULL.\n");
806 		return;
807 	}
808 
809 	if (opts_size == 0) {
810 		SPDK_ERRLOG("opts_size should not be zero.\n");
811 		return;
812 	}
813 
814 	opts->opts_size = opts_size;
815 
816 #define SET_FIELD(field) \
817 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
818 		opts->field = g_spdk_nvme_transport_opts.field; \
819 	} \
820 
821 	SET_FIELD(rdma_srq_size);
822 
823 	/* Do not remove this statement, you should always update this statement when you adding a new field,
824 	 * and do not forget to add the SET_FIELD statement for your added field. */
825 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 12, "Incorrect size");
826 
827 #undef SET_FIELD
828 }
829 
830 int
831 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
832 {
833 	if (opts == NULL) {
834 		SPDK_ERRLOG("opts should not be NULL.\n");
835 		return -EINVAL;
836 	}
837 
838 	if (opts_size == 0) {
839 		SPDK_ERRLOG("opts_size should not be zero.\n");
840 		return -EINVAL;
841 	}
842 
843 #define SET_FIELD(field) \
844 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
845 		g_spdk_nvme_transport_opts.field = opts->field; \
846 	} \
847 
848 	SET_FIELD(rdma_srq_size);
849 
850 	g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
851 
852 #undef SET_FIELD
853 
854 	return 0;
855 }
856