xref: /spdk/lib/nvme/nvme_transport.c (revision cec5ba284b55d19c90359936d77b707e398829f7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe transport abstraction
10  */
11 
12 #include "nvme_internal.h"
13 #include "spdk/queue.h"
14 
15 #define SPDK_MAX_NUM_OF_TRANSPORTS 16
16 
17 struct spdk_nvme_transport {
18 	struct spdk_nvme_transport_ops	ops;
19 	TAILQ_ENTRY(spdk_nvme_transport)	link;
20 };
21 
22 TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
23 	TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
24 
25 static struct spdk_nvme_transport g_transports[SPDK_MAX_NUM_OF_TRANSPORTS] = {};
26 static int g_current_transport_index = 0;
27 
28 struct spdk_nvme_transport_opts g_spdk_nvme_transport_opts = {
29 	.rdma_srq_size = 0,
30 	.rdma_max_cq_size = 0,
31 	.rdma_cm_event_timeout_ms = 1000,
32 	.rdma_umr_per_io = false,
33 };
34 
35 const struct spdk_nvme_transport *
36 nvme_get_first_transport(void)
37 {
38 	return TAILQ_FIRST(&g_spdk_nvme_transports);
39 }
40 
41 const struct spdk_nvme_transport *
42 nvme_get_next_transport(const struct spdk_nvme_transport *transport)
43 {
44 	return TAILQ_NEXT(transport, link);
45 }
46 
47 /*
48  * Unfortunately, due to NVMe PCIe multiprocess support, we cannot store the
49  * transport object in either the controller struct or the admin qpair. This means
50  * that a lot of admin related transport calls will have to call nvme_get_transport
51  * in order to know which functions to call.
52  * In the I/O path, we have the ability to store the transport struct in the I/O
53  * qpairs to avoid taking a performance hit.
54  */
55 const struct spdk_nvme_transport *
56 nvme_get_transport(const char *transport_name)
57 {
58 	struct spdk_nvme_transport *registered_transport;
59 
60 	TAILQ_FOREACH(registered_transport, &g_spdk_nvme_transports, link) {
61 		if (strcasecmp(transport_name, registered_transport->ops.name) == 0) {
62 			return registered_transport;
63 		}
64 	}
65 
66 	return NULL;
67 }
68 
69 bool
70 spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
71 {
72 	return nvme_get_transport(spdk_nvme_transport_id_trtype_str(trtype)) == NULL ? false : true;
73 }
74 
75 bool
76 spdk_nvme_transport_available_by_name(const char *transport_name)
77 {
78 	return nvme_get_transport(transport_name) == NULL ? false : true;
79 }
80 
81 void
82 spdk_nvme_transport_register(const struct spdk_nvme_transport_ops *ops)
83 {
84 	struct spdk_nvme_transport *new_transport;
85 
86 	if (nvme_get_transport(ops->name)) {
87 		SPDK_ERRLOG("Double registering NVMe transport %s is prohibited.\n", ops->name);
88 		assert(false);
89 	}
90 
91 	if (g_current_transport_index == SPDK_MAX_NUM_OF_TRANSPORTS) {
92 		SPDK_ERRLOG("Unable to register new NVMe transport.\n");
93 		assert(false);
94 		return;
95 	}
96 	new_transport = &g_transports[g_current_transport_index++];
97 
98 	new_transport->ops = *ops;
99 	TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, new_transport, link);
100 }
101 
102 struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
103 		const struct spdk_nvme_ctrlr_opts *opts,
104 		void *devhandle)
105 {
106 	const struct spdk_nvme_transport *transport = nvme_get_transport(trid->trstring);
107 	struct spdk_nvme_ctrlr *ctrlr;
108 
109 	if (transport == NULL) {
110 		SPDK_ERRLOG("Transport %s doesn't exist.", trid->trstring);
111 		return NULL;
112 	}
113 
114 	ctrlr = transport->ops.ctrlr_construct(trid, opts, devhandle);
115 
116 	return ctrlr;
117 }
118 
119 int
120 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
121 			  bool direct_connect)
122 {
123 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
124 
125 	if (transport == NULL) {
126 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
127 		return -ENOENT;
128 	}
129 
130 	return transport->ops.ctrlr_scan(probe_ctx, direct_connect);
131 }
132 
133 int
134 nvme_transport_ctrlr_scan_attached(struct spdk_nvme_probe_ctx *probe_ctx)
135 {
136 	const struct spdk_nvme_transport *transport = nvme_get_transport(probe_ctx->trid.trstring);
137 
138 	if (transport == NULL) {
139 		SPDK_ERRLOG("Transport %s doesn't exist.", probe_ctx->trid.trstring);
140 		return -ENOENT;
141 	}
142 
143 	if (transport->ops.ctrlr_scan_attached != NULL) {
144 		return transport->ops.ctrlr_scan_attached(probe_ctx);
145 	}
146 	SPDK_ERRLOG("Transport %s does not support ctrlr_scan_attached callback\n",
147 		    probe_ctx->trid.trstring);
148 	return -ENOTSUP;
149 }
150 
151 int
152 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
153 {
154 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
155 
156 	assert(transport != NULL);
157 	return transport->ops.ctrlr_destruct(ctrlr);
158 }
159 
160 int
161 nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
162 {
163 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
164 
165 	assert(transport != NULL);
166 	return transport->ops.ctrlr_enable(ctrlr);
167 }
168 
169 int
170 nvme_transport_ctrlr_enable_interrupts(struct spdk_nvme_ctrlr *ctrlr)
171 {
172 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
173 
174 	assert(transport != NULL);
175 	if (transport->ops.ctrlr_enable_interrupts != NULL) {
176 		return transport->ops.ctrlr_enable_interrupts(ctrlr);
177 	}
178 
179 	return -ENOTSUP;
180 }
181 
182 int
183 nvme_transport_ctrlr_ready(struct spdk_nvme_ctrlr *ctrlr)
184 {
185 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
186 
187 	assert(transport != NULL);
188 	if (transport->ops.ctrlr_ready) {
189 		return transport->ops.ctrlr_ready(ctrlr);
190 	}
191 
192 	return 0;
193 }
194 
195 int
196 nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
197 {
198 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
199 
200 	assert(transport != NULL);
201 	return transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
202 }
203 
204 int
205 nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
206 {
207 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
208 
209 	assert(transport != NULL);
210 	return transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
211 }
212 
213 int
214 nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
215 {
216 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
217 
218 	assert(transport != NULL);
219 	return transport->ops.ctrlr_get_reg_4(ctrlr, offset, value);
220 }
221 
222 int
223 nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
224 {
225 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
226 
227 	assert(transport != NULL);
228 	return transport->ops.ctrlr_get_reg_8(ctrlr, offset, value);
229 }
230 
231 static int
232 nvme_queue_register_operation_completion(struct spdk_nvme_ctrlr *ctrlr, uint64_t value,
233 		spdk_nvme_reg_cb cb_fn, void *cb_ctx)
234 {
235 	struct nvme_register_completion *ctx;
236 
237 	ctx = spdk_zmalloc(sizeof(*ctx), 0, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
238 	if (ctx == NULL) {
239 		return -ENOMEM;
240 	}
241 
242 	ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
243 	ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS;
244 	ctx->cb_fn = cb_fn;
245 	ctx->cb_ctx = cb_ctx;
246 	ctx->value = value;
247 	ctx->pid = getpid();
248 
249 	nvme_ctrlr_lock(ctrlr);
250 	STAILQ_INSERT_TAIL(&ctrlr->register_operations, ctx, stailq);
251 	nvme_ctrlr_unlock(ctrlr);
252 
253 	return 0;
254 }
255 
256 int
257 nvme_transport_ctrlr_set_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value,
258 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
259 {
260 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
261 	int rc;
262 
263 	assert(transport != NULL);
264 	if (transport->ops.ctrlr_set_reg_4_async == NULL) {
265 		rc = transport->ops.ctrlr_set_reg_4(ctrlr, offset, value);
266 		if (rc != 0) {
267 			return rc;
268 		}
269 
270 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
271 	}
272 
273 	return transport->ops.ctrlr_set_reg_4_async(ctrlr, offset, value, cb_fn, cb_arg);
274 }
275 
276 int
277 nvme_transport_ctrlr_set_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value,
278 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
279 
280 {
281 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
282 	int rc;
283 
284 	assert(transport != NULL);
285 	if (transport->ops.ctrlr_set_reg_8_async == NULL) {
286 		rc = transport->ops.ctrlr_set_reg_8(ctrlr, offset, value);
287 		if (rc != 0) {
288 			return rc;
289 		}
290 
291 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
292 	}
293 
294 	return transport->ops.ctrlr_set_reg_8_async(ctrlr, offset, value, cb_fn, cb_arg);
295 }
296 
297 int
298 nvme_transport_ctrlr_get_reg_4_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
299 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
300 {
301 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
302 	uint32_t value;
303 	int rc;
304 
305 	assert(transport != NULL);
306 	if (transport->ops.ctrlr_get_reg_4_async == NULL) {
307 		rc = transport->ops.ctrlr_get_reg_4(ctrlr, offset, &value);
308 		if (rc != 0) {
309 			return rc;
310 		}
311 
312 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
313 	}
314 
315 	return transport->ops.ctrlr_get_reg_4_async(ctrlr, offset, cb_fn, cb_arg);
316 }
317 
318 int
319 nvme_transport_ctrlr_get_reg_8_async(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
320 				     spdk_nvme_reg_cb cb_fn, void *cb_arg)
321 {
322 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
323 	uint64_t value;
324 	int rc;
325 
326 	assert(transport != NULL);
327 	if (transport->ops.ctrlr_get_reg_8_async == NULL) {
328 		rc = transport->ops.ctrlr_get_reg_8(ctrlr, offset, &value);
329 		if (rc != 0) {
330 			return rc;
331 		}
332 
333 		return nvme_queue_register_operation_completion(ctrlr, value, cb_fn, cb_arg);
334 	}
335 
336 	return transport->ops.ctrlr_get_reg_8_async(ctrlr, offset, cb_fn, cb_arg);
337 }
338 
339 uint32_t
340 nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
341 {
342 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
343 
344 	assert(transport != NULL);
345 	return transport->ops.ctrlr_get_max_xfer_size(ctrlr);
346 }
347 
348 uint16_t
349 nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
350 {
351 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
352 
353 	assert(transport != NULL);
354 	return transport->ops.ctrlr_get_max_sges(ctrlr);
355 }
356 
357 int
358 nvme_transport_ctrlr_reserve_cmb(struct spdk_nvme_ctrlr *ctrlr)
359 {
360 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
361 
362 	assert(transport != NULL);
363 	if (transport->ops.ctrlr_reserve_cmb != NULL) {
364 		return transport->ops.ctrlr_reserve_cmb(ctrlr);
365 	}
366 
367 	return -ENOTSUP;
368 }
369 
370 void *
371 nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
372 {
373 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
374 
375 	assert(transport != NULL);
376 	if (transport->ops.ctrlr_map_cmb != NULL) {
377 		return transport->ops.ctrlr_map_cmb(ctrlr, size);
378 	}
379 
380 	return NULL;
381 }
382 
383 int
384 nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
385 {
386 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
387 
388 	assert(transport != NULL);
389 	if (transport->ops.ctrlr_unmap_cmb != NULL) {
390 		return transport->ops.ctrlr_unmap_cmb(ctrlr);
391 	}
392 
393 	return 0;
394 }
395 
396 int
397 nvme_transport_ctrlr_enable_pmr(struct spdk_nvme_ctrlr *ctrlr)
398 {
399 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
400 
401 	assert(transport != NULL);
402 	if (transport->ops.ctrlr_enable_pmr != NULL) {
403 		return transport->ops.ctrlr_enable_pmr(ctrlr);
404 	}
405 
406 	return -ENOSYS;
407 }
408 
409 int
410 nvme_transport_ctrlr_disable_pmr(struct spdk_nvme_ctrlr *ctrlr)
411 {
412 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
413 
414 	assert(transport != NULL);
415 	if (transport->ops.ctrlr_disable_pmr != NULL) {
416 		return transport->ops.ctrlr_disable_pmr(ctrlr);
417 	}
418 
419 	return -ENOSYS;
420 }
421 
422 void *
423 nvme_transport_ctrlr_map_pmr(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
424 {
425 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
426 
427 	assert(transport != NULL);
428 	if (transport->ops.ctrlr_map_pmr != NULL) {
429 		return transport->ops.ctrlr_map_pmr(ctrlr, size);
430 	}
431 
432 	return NULL;
433 }
434 
435 int
436 nvme_transport_ctrlr_unmap_pmr(struct spdk_nvme_ctrlr *ctrlr)
437 {
438 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
439 
440 	assert(transport != NULL);
441 	if (transport->ops.ctrlr_unmap_pmr != NULL) {
442 		return transport->ops.ctrlr_unmap_pmr(ctrlr);
443 	}
444 
445 	return -ENOSYS;
446 }
447 
448 struct spdk_nvme_qpair *
449 nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
450 				     const struct spdk_nvme_io_qpair_opts *opts)
451 {
452 	struct spdk_nvme_qpair *qpair;
453 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
454 
455 	assert(transport != NULL);
456 	qpair = transport->ops.ctrlr_create_io_qpair(ctrlr, qid, opts);
457 	if (qpair != NULL && !nvme_qpair_is_admin_queue(qpair)) {
458 		qpair->transport = transport;
459 	}
460 
461 	return qpair;
462 }
463 
464 void
465 nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
466 {
467 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
468 	int rc;
469 
470 	assert(transport != NULL);
471 
472 	/* Do not rely on qpair->transport.  For multi-process cases, a foreign process may delete
473 	 * the IO qpair, in which case the transport object would be invalid (each process has their
474 	 * own unique transport objects since they contain function pointers).  So we look up the
475 	 * transport object in the delete_io_qpair case.
476 	 */
477 	rc = transport->ops.ctrlr_delete_io_qpair(ctrlr, qpair);
478 	if (rc != 0) {
479 		SPDK_ERRLOG("transport %s returned non-zero for ctrlr_delete_io_qpair op\n",
480 			    transport->ops.name);
481 		assert(false);
482 	}
483 }
484 
485 static void
486 nvme_transport_connect_qpair_fail(struct spdk_nvme_qpair *qpair, void *unused)
487 {
488 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
489 
490 	/* If the qpair was unable to reconnect, restore the original failure reason */
491 	qpair->transport_failure_reason = qpair->last_transport_failure_reason;
492 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
493 }
494 
495 int
496 nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
497 {
498 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
499 	int rc;
500 
501 	assert(transport != NULL);
502 	if (!nvme_qpair_is_admin_queue(qpair) && qpair->transport == NULL) {
503 		qpair->transport = transport;
504 	}
505 
506 	qpair->last_transport_failure_reason = qpair->transport_failure_reason;
507 	qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE;
508 
509 	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTING);
510 	rc = transport->ops.ctrlr_connect_qpair(ctrlr, qpair);
511 	if (rc != 0) {
512 		goto err;
513 	}
514 
515 	if (qpair->poll_group) {
516 		rc = nvme_poll_group_connect_qpair(qpair);
517 		if (rc) {
518 			goto err;
519 		}
520 	}
521 
522 	if (!qpair->async) {
523 		/* Busy wait until the qpair exits the connecting state */
524 		while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
525 			if (qpair->poll_group && spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
526 				rc = spdk_nvme_poll_group_process_completions(
527 					     qpair->poll_group->group, 0,
528 					     nvme_transport_connect_qpair_fail);
529 			} else {
530 				rc = spdk_nvme_qpair_process_completions(qpair, 0);
531 			}
532 
533 			if (rc < 0) {
534 				goto err;
535 			}
536 		}
537 	}
538 
539 	return 0;
540 err:
541 	nvme_transport_connect_qpair_fail(qpair, NULL);
542 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING) {
543 		assert(qpair->async == true);
544 		/* Let the caller to poll the qpair until it is actually disconnected. */
545 		return 0;
546 	}
547 
548 	return rc;
549 }
550 
551 void
552 nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
553 {
554 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
555 
556 	if (nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTING ||
557 	    nvme_qpair_get_state(qpair) == NVME_QPAIR_DISCONNECTED) {
558 		return;
559 	}
560 
561 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTING);
562 	assert(transport != NULL);
563 
564 	if (qpair->poll_group && (qpair->active_proc == nvme_ctrlr_get_current_process(ctrlr))) {
565 		nvme_poll_group_disconnect_qpair(qpair);
566 	}
567 
568 	transport->ops.ctrlr_disconnect_qpair(ctrlr, qpair);
569 }
570 
571 int
572 nvme_transport_qpair_get_fd(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair,
573 			    struct spdk_event_handler_opts *opts)
574 {
575 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
576 
577 	assert(transport != NULL);
578 	if (transport->ops.qpair_get_fd != NULL) {
579 		return transport->ops.qpair_get_fd(qpair, opts);
580 	}
581 
582 	return -ENOTSUP;
583 }
584 
585 void
586 nvme_transport_ctrlr_disconnect_qpair_done(struct spdk_nvme_qpair *qpair)
587 {
588 	if (qpair->active_proc == nvme_ctrlr_get_current_process(qpair->ctrlr) ||
589 	    nvme_qpair_is_admin_queue(qpair)) {
590 		nvme_qpair_abort_all_queued_reqs(qpair);
591 	}
592 	nvme_qpair_set_state(qpair, NVME_QPAIR_DISCONNECTED);
593 
594 	/* In interrupt mode qpairs that are added to poll group need an event for the
595 	 * disconnected qpairs handling to kick in.
596 	 */
597 	if (qpair->poll_group) {
598 		nvme_poll_group_write_disconnect_qpair_fd(qpair->poll_group->group);
599 	}
600 }
601 
602 int
603 nvme_transport_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr,
604 					struct spdk_memory_domain **domains, int array_size)
605 {
606 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
607 
608 	assert(transport != NULL);
609 	if (transport->ops.ctrlr_get_memory_domains) {
610 		return transport->ops.ctrlr_get_memory_domains(ctrlr, domains, array_size);
611 	}
612 
613 	return 0;
614 }
615 
616 void
617 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair)
618 {
619 	const struct spdk_nvme_transport *transport;
620 
621 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
622 		qpair->transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
623 	} else {
624 		transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
625 		assert(transport != NULL);
626 		transport->ops.qpair_abort_reqs(qpair, qpair->abort_dnr);
627 	}
628 }
629 
630 int
631 nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
632 {
633 	const struct spdk_nvme_transport *transport;
634 
635 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
636 		return qpair->transport->ops.qpair_reset(qpair);
637 	}
638 
639 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
640 	assert(transport != NULL);
641 	return transport->ops.qpair_reset(qpair);
642 }
643 
644 int
645 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
646 {
647 	const struct spdk_nvme_transport *transport;
648 
649 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
650 		return qpair->transport->ops.qpair_submit_request(qpair, req);
651 	}
652 
653 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
654 	assert(transport != NULL);
655 	return transport->ops.qpair_submit_request(qpair, req);
656 }
657 
658 int32_t
659 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
660 {
661 	const struct spdk_nvme_transport *transport;
662 
663 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
664 		return qpair->transport->ops.qpair_process_completions(qpair, max_completions);
665 	}
666 
667 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
668 	assert(transport != NULL);
669 	return transport->ops.qpair_process_completions(qpair, max_completions);
670 }
671 
672 int
673 nvme_transport_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
674 				      int (*iter_fn)(struct nvme_request *req, void *arg),
675 				      void *arg)
676 {
677 	const struct spdk_nvme_transport *transport;
678 
679 	if (spdk_likely(!nvme_qpair_is_admin_queue(qpair))) {
680 		return qpair->transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
681 	}
682 
683 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
684 	assert(transport != NULL);
685 	return transport->ops.qpair_iterate_requests(qpair, iter_fn, arg);
686 }
687 
688 int
689 nvme_transport_qpair_authenticate(struct spdk_nvme_qpair *qpair)
690 {
691 	const struct spdk_nvme_transport *transport;
692 
693 	transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
694 	if (transport->ops.qpair_authenticate == NULL) {
695 		return -ENOTSUP;
696 	}
697 
698 	return transport->ops.qpair_authenticate(qpair);
699 }
700 
701 void
702 nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
703 {
704 	const struct spdk_nvme_transport *transport = nvme_get_transport(qpair->ctrlr->trid.trstring);
705 
706 	assert(transport != NULL);
707 	transport->ops.admin_qpair_abort_aers(qpair);
708 }
709 
710 struct spdk_nvme_transport_poll_group *
711 nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
712 {
713 	struct spdk_nvme_transport_poll_group *group = NULL;
714 
715 	group = transport->ops.poll_group_create();
716 	if (group) {
717 		group->transport = transport;
718 		STAILQ_INIT(&group->connected_qpairs);
719 		STAILQ_INIT(&group->disconnected_qpairs);
720 		group->num_connected_qpairs = 0;
721 	}
722 
723 	return group;
724 }
725 
726 struct spdk_nvme_transport_poll_group *
727 nvme_transport_qpair_get_optimal_poll_group(const struct spdk_nvme_transport *transport,
728 		struct spdk_nvme_qpair *qpair)
729 {
730 	if (transport->ops.qpair_get_optimal_poll_group) {
731 		return transport->ops.qpair_get_optimal_poll_group(qpair);
732 	} else {
733 		return NULL;
734 	}
735 }
736 
737 int
738 nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
739 			      struct spdk_nvme_qpair *qpair)
740 {
741 	int rc;
742 
743 	rc = tgroup->transport->ops.poll_group_add(tgroup, qpair);
744 	if (rc == 0) {
745 		qpair->poll_group = tgroup;
746 		assert(nvme_qpair_get_state(qpair) < NVME_QPAIR_CONNECTED);
747 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
748 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
749 	}
750 
751 	return rc;
752 }
753 
754 int
755 nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
756 				 struct spdk_nvme_qpair *qpair)
757 {
758 	int rc __attribute__((unused));
759 
760 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
761 		return -EINVAL;
762 	} else if (qpair->poll_group_tailq_head != &tgroup->disconnected_qpairs) {
763 		return -ENOENT;
764 	}
765 
766 	rc = tgroup->transport->ops.poll_group_remove(tgroup, qpair);
767 	assert(rc == 0);
768 
769 	STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
770 
771 	qpair->poll_group = NULL;
772 	qpair->poll_group_tailq_head = NULL;
773 
774 	return 0;
775 }
776 
777 int64_t
778 nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
779 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
780 {
781 	return tgroup->transport->ops.poll_group_process_completions(tgroup, completions_per_qpair,
782 			disconnected_qpair_cb);
783 }
784 
785 void
786 nvme_transport_poll_group_check_disconnected_qpairs(struct spdk_nvme_transport_poll_group *tgroup,
787 		spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
788 {
789 	return tgroup->transport->ops.poll_group_check_disconnected_qpairs(tgroup,
790 			disconnected_qpair_cb);
791 }
792 
793 int
794 nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
795 {
796 	return tgroup->transport->ops.poll_group_destroy(tgroup);
797 }
798 
799 int
800 nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
801 {
802 	struct spdk_nvme_transport_poll_group *tgroup;
803 	int rc __attribute__((unused));
804 
805 	tgroup = qpair->poll_group;
806 
807 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
808 		return 0;
809 	}
810 
811 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
812 		rc = tgroup->transport->ops.poll_group_disconnect_qpair(qpair);
813 		assert(rc == 0);
814 
815 		qpair->poll_group_tailq_head = &tgroup->disconnected_qpairs;
816 		STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
817 		assert(tgroup->num_connected_qpairs > 0);
818 		tgroup->num_connected_qpairs--;
819 		STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
820 
821 		return 0;
822 	}
823 
824 	return -EINVAL;
825 }
826 
827 int
828 nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
829 {
830 	struct spdk_nvme_transport_poll_group *tgroup;
831 	int rc;
832 
833 	tgroup = qpair->poll_group;
834 
835 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
836 		return 0;
837 	}
838 
839 	if (qpair->poll_group_tailq_head == &tgroup->disconnected_qpairs) {
840 		rc = tgroup->transport->ops.poll_group_connect_qpair(qpair);
841 		if (rc == 0) {
842 			qpair->poll_group_tailq_head = &tgroup->connected_qpairs;
843 			STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
844 			STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
845 			tgroup->num_connected_qpairs++;
846 		}
847 
848 		return rc == -EINPROGRESS ? 0 : rc;
849 	}
850 
851 
852 	return -EINVAL;
853 }
854 
855 int
856 nvme_transport_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
857 				    struct spdk_nvme_transport_poll_group_stat **stats)
858 {
859 	if (tgroup->transport->ops.poll_group_get_stats) {
860 		return tgroup->transport->ops.poll_group_get_stats(tgroup, stats);
861 	}
862 	return -ENOTSUP;
863 }
864 
865 void
866 nvme_transport_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
867 				     struct spdk_nvme_transport_poll_group_stat *stats)
868 {
869 	if (tgroup->transport->ops.poll_group_free_stats) {
870 		tgroup->transport->ops.poll_group_free_stats(tgroup, stats);
871 	}
872 }
873 
874 spdk_nvme_transport_type_t
875 nvme_transport_get_trtype(const struct spdk_nvme_transport *transport)
876 {
877 	return transport->ops.type;
878 }
879 
880 void
881 spdk_nvme_transport_get_opts(struct spdk_nvme_transport_opts *opts, size_t opts_size)
882 {
883 	if (opts == NULL) {
884 		SPDK_ERRLOG("opts should not be NULL.\n");
885 		return;
886 	}
887 
888 	if (opts_size == 0) {
889 		SPDK_ERRLOG("opts_size should not be zero.\n");
890 		return;
891 	}
892 
893 	opts->opts_size = opts_size;
894 
895 #define SET_FIELD(field) \
896 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts_size) { \
897 		opts->field = g_spdk_nvme_transport_opts.field; \
898 	} \
899 
900 	SET_FIELD(rdma_srq_size);
901 	SET_FIELD(rdma_max_cq_size);
902 	SET_FIELD(rdma_cm_event_timeout_ms);
903 	SET_FIELD(rdma_umr_per_io);
904 
905 	/* Do not remove this statement, you should always update this statement when you adding a new field,
906 	 * and do not forget to add the SET_FIELD statement for your added field. */
907 	SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_transport_opts) == 24, "Incorrect size");
908 
909 #undef SET_FIELD
910 }
911 
912 int
913 spdk_nvme_transport_set_opts(const struct spdk_nvme_transport_opts *opts, size_t opts_size)
914 {
915 	if (opts == NULL) {
916 		SPDK_ERRLOG("opts should not be NULL.\n");
917 		return -EINVAL;
918 	}
919 
920 	if (opts_size == 0) {
921 		SPDK_ERRLOG("opts_size should not be zero.\n");
922 		return -EINVAL;
923 	}
924 
925 #define SET_FIELD(field) \
926 	if (offsetof(struct spdk_nvme_transport_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
927 		g_spdk_nvme_transport_opts.field = opts->field; \
928 	} \
929 
930 	SET_FIELD(rdma_srq_size);
931 	SET_FIELD(rdma_max_cq_size);
932 	SET_FIELD(rdma_cm_event_timeout_ms);
933 	SET_FIELD(rdma_umr_per_io);
934 
935 	g_spdk_nvme_transport_opts.opts_size = opts->opts_size;
936 
937 #undef SET_FIELD
938 
939 	return 0;
940 }
941 
942 volatile struct spdk_nvme_registers *
943 spdk_nvme_ctrlr_get_registers(struct spdk_nvme_ctrlr *ctrlr)
944 {
945 	const struct spdk_nvme_transport *transport = nvme_get_transport(ctrlr->trid.trstring);
946 
947 	if (transport == NULL) {
948 		/* Transport does not exist. */
949 		return NULL;
950 	}
951 
952 	if (transport->ops.ctrlr_get_registers) {
953 		return transport->ops.ctrlr_get_registers(ctrlr);
954 	}
955 
956 	return NULL;
957 }
958