xref: /spdk/lib/nvme/nvme_rdma.c (revision d491e7ea33f0f52fd9abbfc4fbfff6a7f3cf2ec2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe over RDMA transport
36  */
37 
38 #include "spdk/stdinc.h"
39 
40 #include "spdk/assert.h"
41 #include "spdk/log.h"
42 #include "spdk/trace.h"
43 #include "spdk/queue.h"
44 #include "spdk/nvme.h"
45 #include "spdk/nvmf_spec.h"
46 #include "spdk/string.h"
47 #include "spdk/endian.h"
48 #include "spdk/likely.h"
49 #include "spdk/config.h"
50 
51 #include "nvme_internal.h"
52 #include "spdk_internal/rdma.h"
53 
54 #define NVME_RDMA_TIME_OUT_IN_MS 2000
55 #define NVME_RDMA_RW_BUFFER_SIZE 131072
56 
57 /*
58  * NVME RDMA qpair Resource Defaults
59  */
60 #define NVME_RDMA_DEFAULT_TX_SGE		2
61 #define NVME_RDMA_DEFAULT_RX_SGE		1
62 
63 /* Max number of NVMe-oF SGL descriptors supported by the host */
64 #define NVME_RDMA_MAX_SGL_DESCRIPTORS		16
65 
66 /* number of STAILQ entries for holding pending RDMA CM events. */
67 #define NVME_RDMA_NUM_CM_EVENTS			256
68 
69 /* CM event processing timeout */
70 #define NVME_RDMA_QPAIR_CM_EVENT_TIMEOUT_US	1000000
71 
72 /* The default size for a shared rdma completion queue. */
73 #define DEFAULT_NVME_RDMA_CQ_SIZE		4096
74 
75 /*
76  * In the special case of a stale connection we don't expose a mechanism
77  * for the user to retry the connection so we need to handle it internally.
78  */
79 #define NVME_RDMA_STALE_CONN_RETRY_MAX		5
80 #define NVME_RDMA_STALE_CONN_RETRY_DELAY_US	10000
81 
82 /*
83  * Maximum value of transport_retry_count used by RDMA controller
84  */
85 #define NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT	7
86 
87 /*
88  * Maximum value of transport_ack_timeout used by RDMA controller
89  */
90 #define NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT	31
91 
92 /*
93  * Number of poller cycles to keep a pointer to destroyed qpairs
94  * in the poll group.
95  */
96 #define NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_CYCLES	50
97 
98 /*
99  * The max length of keyed SGL data block (3 bytes)
100  */
101 #define NVME_RDMA_MAX_KEYED_SGL_LENGTH ((1u << 24u) - 1)
102 
103 #define WC_PER_QPAIR(queue_depth)	(queue_depth * 2)
104 
105 enum nvme_rdma_wr_type {
106 	RDMA_WR_TYPE_RECV,
107 	RDMA_WR_TYPE_SEND,
108 };
109 
110 struct nvme_rdma_wr {
111 	/* Using this instead of the enum allows this struct to only occupy one byte. */
112 	uint8_t	type;
113 };
114 
115 struct spdk_nvmf_cmd {
116 	struct spdk_nvme_cmd cmd;
117 	struct spdk_nvme_sgl_descriptor sgl[NVME_RDMA_MAX_SGL_DESCRIPTORS];
118 };
119 
120 struct spdk_nvme_rdma_hooks g_nvme_hooks = {};
121 
122 /* STAILQ wrapper for cm events. */
123 struct nvme_rdma_cm_event_entry {
124 	struct rdma_cm_event			*evt;
125 	STAILQ_ENTRY(nvme_rdma_cm_event_entry)	link;
126 };
127 
128 /* NVMe RDMA transport extensions for spdk_nvme_ctrlr */
129 struct nvme_rdma_ctrlr {
130 	struct spdk_nvme_ctrlr			ctrlr;
131 
132 	struct ibv_pd				*pd;
133 
134 	uint16_t				max_sge;
135 
136 	struct rdma_event_channel		*cm_channel;
137 
138 	STAILQ_HEAD(, nvme_rdma_cm_event_entry)	pending_cm_events;
139 
140 	STAILQ_HEAD(, nvme_rdma_cm_event_entry)	free_cm_events;
141 
142 	struct nvme_rdma_cm_event_entry		*cm_events;
143 };
144 
145 struct nvme_rdma_destroyed_qpair {
146 	struct nvme_rdma_qpair			*destroyed_qpair_tracker;
147 	uint32_t				completed_cycles;
148 	STAILQ_ENTRY(nvme_rdma_destroyed_qpair)	link;
149 };
150 
151 struct nvme_rdma_poller_stats {
152 	uint64_t polls;
153 	uint64_t idle_polls;
154 	uint64_t queued_requests;
155 	uint64_t completions;
156 	struct spdk_rdma_qp_stats rdma_stats;
157 };
158 
159 struct nvme_rdma_poller {
160 	struct ibv_context		*device;
161 	struct ibv_cq			*cq;
162 	int				required_num_wc;
163 	int				current_num_wc;
164 	struct nvme_rdma_poller_stats	stats;
165 	STAILQ_ENTRY(nvme_rdma_poller)	link;
166 };
167 
168 struct nvme_rdma_poll_group {
169 	struct spdk_nvme_transport_poll_group		group;
170 	STAILQ_HEAD(, nvme_rdma_poller)			pollers;
171 	uint32_t					num_pollers;
172 	STAILQ_HEAD(, nvme_rdma_destroyed_qpair)	destroyed_qpairs;
173 };
174 
175 /* Memory regions */
176 union nvme_rdma_mr {
177 	struct ibv_mr	*mr;
178 	uint64_t	key;
179 };
180 
181 /* NVMe RDMA qpair extensions for spdk_nvme_qpair */
182 struct nvme_rdma_qpair {
183 	struct spdk_nvme_qpair			qpair;
184 
185 	struct spdk_rdma_qp			*rdma_qp;
186 	struct rdma_cm_id			*cm_id;
187 	struct ibv_cq				*cq;
188 
189 	struct	spdk_nvme_rdma_req		*rdma_reqs;
190 
191 	uint32_t				max_send_sge;
192 
193 	uint32_t				max_recv_sge;
194 
195 	uint16_t				num_entries;
196 
197 	bool					delay_cmd_submit;
198 
199 	bool					poll_group_disconnect_in_progress;
200 
201 	uint32_t				num_completions;
202 
203 	/* Parallel arrays of response buffers + response SGLs of size num_entries */
204 	struct ibv_sge				*rsp_sgls;
205 	struct spdk_nvme_rdma_rsp		*rsps;
206 
207 	struct ibv_recv_wr			*rsp_recv_wrs;
208 
209 	/* Memory region describing all rsps for this qpair */
210 	union nvme_rdma_mr			rsp_mr;
211 
212 	/*
213 	 * Array of num_entries NVMe commands registered as RDMA message buffers.
214 	 * Indexed by rdma_req->id.
215 	 */
216 	struct spdk_nvmf_cmd			*cmds;
217 
218 	/* Memory region describing all cmds for this qpair */
219 	union nvme_rdma_mr			cmd_mr;
220 
221 	struct spdk_rdma_mem_map		*mr_map;
222 
223 	TAILQ_HEAD(, spdk_nvme_rdma_req)	free_reqs;
224 	TAILQ_HEAD(, spdk_nvme_rdma_req)	outstanding_reqs;
225 
226 	/* Counts of outstanding send and recv objects */
227 	uint16_t				current_num_recvs;
228 	uint16_t				current_num_sends;
229 
230 	/* Placed at the end of the struct since it is not used frequently */
231 	struct rdma_cm_event			*evt;
232 	struct nvme_rdma_poller			*poller;
233 
234 	/* Used by poll group to keep the qpair around until it is ready to remove it. */
235 	bool					defer_deletion_to_pg;
236 };
237 
238 enum NVME_RDMA_COMPLETION_FLAGS {
239 	NVME_RDMA_SEND_COMPLETED = 1u << 0,
240 	NVME_RDMA_RECV_COMPLETED = 1u << 1,
241 };
242 
243 struct spdk_nvme_rdma_req {
244 	uint16_t				id;
245 	uint16_t				completion_flags: 2;
246 	uint16_t				reserved: 14;
247 	/* if completion of RDMA_RECV received before RDMA_SEND, we will complete nvme request
248 	 * during processing of RDMA_SEND. To complete the request we must know the index
249 	 * of nvme_cpl received in RDMA_RECV, so store it in this field */
250 	uint16_t				rsp_idx;
251 
252 	struct nvme_rdma_wr			rdma_wr;
253 
254 	struct ibv_send_wr			send_wr;
255 
256 	struct nvme_request			*req;
257 
258 	struct ibv_sge				send_sgl[NVME_RDMA_DEFAULT_TX_SGE];
259 
260 	TAILQ_ENTRY(spdk_nvme_rdma_req)		link;
261 };
262 
263 struct spdk_nvme_rdma_rsp {
264 	struct spdk_nvme_cpl	cpl;
265 	struct nvme_rdma_qpair	*rqpair;
266 	uint16_t		idx;
267 	struct nvme_rdma_wr	rdma_wr;
268 };
269 
270 static const char *rdma_cm_event_str[] = {
271 	"RDMA_CM_EVENT_ADDR_RESOLVED",
272 	"RDMA_CM_EVENT_ADDR_ERROR",
273 	"RDMA_CM_EVENT_ROUTE_RESOLVED",
274 	"RDMA_CM_EVENT_ROUTE_ERROR",
275 	"RDMA_CM_EVENT_CONNECT_REQUEST",
276 	"RDMA_CM_EVENT_CONNECT_RESPONSE",
277 	"RDMA_CM_EVENT_CONNECT_ERROR",
278 	"RDMA_CM_EVENT_UNREACHABLE",
279 	"RDMA_CM_EVENT_REJECTED",
280 	"RDMA_CM_EVENT_ESTABLISHED",
281 	"RDMA_CM_EVENT_DISCONNECTED",
282 	"RDMA_CM_EVENT_DEVICE_REMOVAL",
283 	"RDMA_CM_EVENT_MULTICAST_JOIN",
284 	"RDMA_CM_EVENT_MULTICAST_ERROR",
285 	"RDMA_CM_EVENT_ADDR_CHANGE",
286 	"RDMA_CM_EVENT_TIMEWAIT_EXIT"
287 };
288 
289 struct nvme_rdma_qpair *nvme_rdma_poll_group_get_qpair_by_id(struct nvme_rdma_poll_group *group,
290 		uint32_t qp_num);
291 
292 static inline void *
293 nvme_rdma_calloc(size_t nmemb, size_t size)
294 {
295 	if (!nmemb || !size) {
296 		return NULL;
297 	}
298 
299 	if (!g_nvme_hooks.get_rkey) {
300 		return calloc(nmemb, size);
301 	} else {
302 		return spdk_zmalloc(nmemb * size, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
303 	}
304 }
305 
306 static inline void
307 nvme_rdma_free(void *buf)
308 {
309 	if (!g_nvme_hooks.get_rkey) {
310 		free(buf);
311 	} else {
312 		spdk_free(buf);
313 	}
314 }
315 
316 static int nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
317 		struct spdk_nvme_qpair *qpair);
318 
319 static inline struct nvme_rdma_qpair *
320 nvme_rdma_qpair(struct spdk_nvme_qpair *qpair)
321 {
322 	assert(qpair->trtype == SPDK_NVME_TRANSPORT_RDMA);
323 	return SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
324 }
325 
326 static inline struct nvme_rdma_poll_group *
327 nvme_rdma_poll_group(struct spdk_nvme_transport_poll_group *group)
328 {
329 	return (SPDK_CONTAINEROF(group, struct nvme_rdma_poll_group, group));
330 }
331 
332 static inline struct nvme_rdma_ctrlr *
333 nvme_rdma_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
334 {
335 	assert(ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_RDMA);
336 	return SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
337 }
338 
339 static struct spdk_nvme_rdma_req *
340 nvme_rdma_req_get(struct nvme_rdma_qpair *rqpair)
341 {
342 	struct spdk_nvme_rdma_req *rdma_req;
343 
344 	rdma_req = TAILQ_FIRST(&rqpair->free_reqs);
345 	if (rdma_req) {
346 		TAILQ_REMOVE(&rqpair->free_reqs, rdma_req, link);
347 		TAILQ_INSERT_TAIL(&rqpair->outstanding_reqs, rdma_req, link);
348 	}
349 
350 	return rdma_req;
351 }
352 
353 static void
354 nvme_rdma_req_put(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req)
355 {
356 	rdma_req->completion_flags = 0;
357 	rdma_req->req = NULL;
358 	TAILQ_INSERT_HEAD(&rqpair->free_reqs, rdma_req, link);
359 }
360 
361 static void
362 nvme_rdma_req_complete(struct spdk_nvme_rdma_req *rdma_req,
363 		       struct spdk_nvme_cpl *rsp)
364 {
365 	struct nvme_request *req = rdma_req->req;
366 	struct nvme_rdma_qpair *rqpair;
367 
368 	assert(req != NULL);
369 
370 	rqpair = nvme_rdma_qpair(req->qpair);
371 	TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
372 
373 	nvme_complete_request(req->cb_fn, req->cb_arg, req->qpair, req, rsp);
374 	nvme_free_request(req);
375 }
376 
377 static const char *
378 nvme_rdma_cm_event_str_get(uint32_t event)
379 {
380 	if (event < SPDK_COUNTOF(rdma_cm_event_str)) {
381 		return rdma_cm_event_str[event];
382 	} else {
383 		return "Undefined";
384 	}
385 }
386 
387 
388 static int
389 nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
390 {
391 	struct rdma_cm_event				*event = rqpair->evt;
392 	struct spdk_nvmf_rdma_accept_private_data	*accept_data;
393 	int						rc = 0;
394 
395 	if (event) {
396 		switch (event->event) {
397 		case RDMA_CM_EVENT_ADDR_RESOLVED:
398 		case RDMA_CM_EVENT_ADDR_ERROR:
399 		case RDMA_CM_EVENT_ROUTE_RESOLVED:
400 		case RDMA_CM_EVENT_ROUTE_ERROR:
401 			break;
402 		case RDMA_CM_EVENT_CONNECT_REQUEST:
403 			break;
404 		case RDMA_CM_EVENT_CONNECT_ERROR:
405 			break;
406 		case RDMA_CM_EVENT_UNREACHABLE:
407 		case RDMA_CM_EVENT_REJECTED:
408 			break;
409 		case RDMA_CM_EVENT_CONNECT_RESPONSE:
410 			rc = spdk_rdma_qp_complete_connect(rqpair->rdma_qp);
411 		/* fall through */
412 		case RDMA_CM_EVENT_ESTABLISHED:
413 			accept_data = (struct spdk_nvmf_rdma_accept_private_data *)event->param.conn.private_data;
414 			if (accept_data == NULL) {
415 				rc = -1;
416 			} else {
417 				SPDK_DEBUGLOG(nvme, "Requested queue depth %d. Actually got queue depth %d.\n",
418 					      rqpair->num_entries, accept_data->crqsize);
419 				rqpair->num_entries = spdk_min(rqpair->num_entries, accept_data->crqsize);
420 			}
421 			break;
422 		case RDMA_CM_EVENT_DISCONNECTED:
423 			rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE;
424 			break;
425 		case RDMA_CM_EVENT_DEVICE_REMOVAL:
426 			rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
427 			break;
428 		case RDMA_CM_EVENT_MULTICAST_JOIN:
429 		case RDMA_CM_EVENT_MULTICAST_ERROR:
430 			break;
431 		case RDMA_CM_EVENT_ADDR_CHANGE:
432 			rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_LOCAL;
433 			break;
434 		case RDMA_CM_EVENT_TIMEWAIT_EXIT:
435 			break;
436 		default:
437 			SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
438 			break;
439 		}
440 		rqpair->evt = NULL;
441 		rdma_ack_cm_event(event);
442 	}
443 
444 	return rc;
445 }
446 
447 /*
448  * This function must be called under the nvme controller's lock
449  * because it touches global controller variables. The lock is taken
450  * by the generic transport code before invoking a few of the functions
451  * in this file: nvme_rdma_ctrlr_connect_qpair, nvme_rdma_ctrlr_delete_io_qpair,
452  * and conditionally nvme_rdma_qpair_process_completions when it is calling
453  * completions on the admin qpair. When adding a new call to this function, please
454  * verify that it is in a situation where it falls under the lock.
455  */
456 static int
457 nvme_rdma_poll_events(struct nvme_rdma_ctrlr *rctrlr)
458 {
459 	struct nvme_rdma_cm_event_entry	*entry, *tmp;
460 	struct nvme_rdma_qpair		*event_qpair;
461 	struct rdma_cm_event		*event;
462 	struct rdma_event_channel	*channel = rctrlr->cm_channel;
463 
464 	STAILQ_FOREACH_SAFE(entry, &rctrlr->pending_cm_events, link, tmp) {
465 		event_qpair = nvme_rdma_qpair(entry->evt->id->context);
466 		if (event_qpair->evt == NULL) {
467 			event_qpair->evt = entry->evt;
468 			STAILQ_REMOVE(&rctrlr->pending_cm_events, entry, nvme_rdma_cm_event_entry, link);
469 			STAILQ_INSERT_HEAD(&rctrlr->free_cm_events, entry, link);
470 		}
471 	}
472 
473 	while (rdma_get_cm_event(channel, &event) == 0) {
474 		event_qpair = nvme_rdma_qpair(event->id->context);
475 		if (event_qpair->evt == NULL) {
476 			event_qpair->evt = event;
477 		} else {
478 			assert(rctrlr == nvme_rdma_ctrlr(event_qpair->qpair.ctrlr));
479 			entry = STAILQ_FIRST(&rctrlr->free_cm_events);
480 			if (entry == NULL) {
481 				rdma_ack_cm_event(event);
482 				return -ENOMEM;
483 			}
484 			STAILQ_REMOVE(&rctrlr->free_cm_events, entry, nvme_rdma_cm_event_entry, link);
485 			entry->evt = event;
486 			STAILQ_INSERT_TAIL(&rctrlr->pending_cm_events, entry, link);
487 		}
488 	}
489 
490 	if (errno == EAGAIN || errno == EWOULDBLOCK) {
491 		return 0;
492 	} else {
493 		return errno;
494 	}
495 }
496 
497 static int
498 nvme_rdma_validate_cm_event(enum rdma_cm_event_type expected_evt_type,
499 			    struct rdma_cm_event *reaped_evt)
500 {
501 	int rc = -EBADMSG;
502 
503 	if (expected_evt_type == reaped_evt->event) {
504 		return 0;
505 	}
506 
507 	switch (expected_evt_type) {
508 	case RDMA_CM_EVENT_ESTABLISHED:
509 		/*
510 		 * There is an enum ib_cm_rej_reason in the kernel headers that sets 10 as
511 		 * IB_CM_REJ_STALE_CONN. I can't find the corresponding userspace but we get
512 		 * the same values here.
513 		 */
514 		if (reaped_evt->event == RDMA_CM_EVENT_REJECTED && reaped_evt->status == 10) {
515 			rc = -ESTALE;
516 		} else if (reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE) {
517 			/*
518 			 *  If we are using a qpair which is not created using rdma cm API
519 			 *  then we will receive RDMA_CM_EVENT_CONNECT_RESPONSE instead of
520 			 *  RDMA_CM_EVENT_ESTABLISHED.
521 			 */
522 			return 0;
523 		}
524 		break;
525 	default:
526 		break;
527 	}
528 
529 	SPDK_ERRLOG("Expected %s but received %s (%d) from CM event channel (status = %d)\n",
530 		    nvme_rdma_cm_event_str_get(expected_evt_type),
531 		    nvme_rdma_cm_event_str_get(reaped_evt->event), reaped_evt->event,
532 		    reaped_evt->status);
533 	return rc;
534 }
535 
536 static int
537 nvme_rdma_process_event(struct nvme_rdma_qpair *rqpair,
538 			struct rdma_event_channel *channel,
539 			enum rdma_cm_event_type evt)
540 {
541 	struct nvme_rdma_ctrlr	*rctrlr;
542 	uint64_t timeout_ticks;
543 	int	rc = 0, rc2;
544 
545 	if (rqpair->evt != NULL) {
546 		rc = nvme_rdma_qpair_process_cm_event(rqpair);
547 		if (rc) {
548 			return rc;
549 		}
550 	}
551 
552 	timeout_ticks = (NVME_RDMA_QPAIR_CM_EVENT_TIMEOUT_US * spdk_get_ticks_hz()) / SPDK_SEC_TO_USEC +
553 			spdk_get_ticks();
554 	rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr);
555 	assert(rctrlr != NULL);
556 
557 	while (!rqpair->evt && spdk_get_ticks() < timeout_ticks && rc == 0) {
558 		rc = nvme_rdma_poll_events(rctrlr);
559 	}
560 
561 	if (rc) {
562 		return rc;
563 	}
564 
565 	if (rqpair->evt == NULL) {
566 		return -EADDRNOTAVAIL;
567 	}
568 
569 	rc = nvme_rdma_validate_cm_event(evt, rqpair->evt);
570 
571 	rc2 = nvme_rdma_qpair_process_cm_event(rqpair);
572 	/* bad message takes precedence over the other error codes from processing the event. */
573 	return rc == 0 ? rc2 : rc;
574 }
575 
576 static int
577 nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
578 {
579 	int			rc;
580 	struct spdk_rdma_qp_init_attr	attr = {};
581 	struct ibv_device_attr	dev_attr;
582 	struct nvme_rdma_ctrlr	*rctrlr;
583 
584 	rc = ibv_query_device(rqpair->cm_id->verbs, &dev_attr);
585 	if (rc != 0) {
586 		SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
587 		return -1;
588 	}
589 
590 	if (rqpair->qpair.poll_group) {
591 		assert(!rqpair->cq);
592 		rc = nvme_poll_group_connect_qpair(&rqpair->qpair);
593 		if (rc) {
594 			SPDK_ERRLOG("Unable to activate the rdmaqpair.\n");
595 			return -1;
596 		}
597 		assert(rqpair->cq);
598 	} else {
599 		rqpair->cq = ibv_create_cq(rqpair->cm_id->verbs, rqpair->num_entries * 2, rqpair, NULL, 0);
600 		if (!rqpair->cq) {
601 			SPDK_ERRLOG("Unable to create completion queue: errno %d: %s\n", errno, spdk_strerror(errno));
602 			return -1;
603 		}
604 	}
605 
606 	rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr);
607 	if (g_nvme_hooks.get_ibv_pd) {
608 		rctrlr->pd = g_nvme_hooks.get_ibv_pd(&rctrlr->ctrlr.trid, rqpair->cm_id->verbs);
609 	} else {
610 		rctrlr->pd = NULL;
611 	}
612 
613 	attr.pd =		rctrlr->pd;
614 	attr.stats =		rqpair->poller ? &rqpair->poller->stats.rdma_stats : NULL;
615 	attr.send_cq		= rqpair->cq;
616 	attr.recv_cq		= rqpair->cq;
617 	attr.cap.max_send_wr	= rqpair->num_entries; /* SEND operations */
618 	attr.cap.max_recv_wr	= rqpair->num_entries; /* RECV operations */
619 	attr.cap.max_send_sge	= spdk_min(NVME_RDMA_DEFAULT_TX_SGE, dev_attr.max_sge);
620 	attr.cap.max_recv_sge	= spdk_min(NVME_RDMA_DEFAULT_RX_SGE, dev_attr.max_sge);
621 
622 	rqpair->rdma_qp = spdk_rdma_qp_create(rqpair->cm_id, &attr);
623 
624 	if (!rqpair->rdma_qp) {
625 		return -1;
626 	}
627 
628 	/* ibv_create_qp will change the values in attr.cap. Make sure we store the proper value. */
629 	rqpair->max_send_sge = spdk_min(NVME_RDMA_DEFAULT_TX_SGE, attr.cap.max_send_sge);
630 	rqpair->max_recv_sge = spdk_min(NVME_RDMA_DEFAULT_RX_SGE, attr.cap.max_recv_sge);
631 	rqpair->current_num_recvs = 0;
632 	rqpair->current_num_sends = 0;
633 
634 	rctrlr->pd = rqpair->rdma_qp->qp->pd;
635 
636 	rqpair->cm_id->context = &rqpair->qpair;
637 
638 	return 0;
639 }
640 
641 static inline int
642 nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
643 {
644 	struct ibv_send_wr *bad_send_wr = NULL;
645 	int rc;
646 
647 	rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr);
648 
649 	if (spdk_unlikely(rc)) {
650 		SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n",
651 			    rc, spdk_strerror(rc), bad_send_wr);
652 		while (bad_send_wr != NULL) {
653 			assert(rqpair->current_num_sends > 0);
654 			rqpair->current_num_sends--;
655 			bad_send_wr = bad_send_wr->next;
656 		}
657 		return rc;
658 	}
659 
660 	return 0;
661 }
662 
663 static inline int
664 nvme_rdma_qpair_submit_recvs(struct nvme_rdma_qpair *rqpair)
665 {
666 	struct ibv_recv_wr *bad_recv_wr;
667 	int rc = 0;
668 
669 	rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
670 	if (spdk_unlikely(rc)) {
671 		SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n",
672 			    rc, spdk_strerror(rc), bad_recv_wr);
673 		while (bad_recv_wr != NULL) {
674 			assert(rqpair->current_num_sends > 0);
675 			rqpair->current_num_recvs--;
676 			bad_recv_wr = bad_recv_wr->next;
677 		}
678 	}
679 
680 	return rc;
681 }
682 
683 /* Append the given send wr structure to the qpair's outstanding sends list. */
684 /* This function accepts only a single wr. */
685 static inline int
686 nvme_rdma_qpair_queue_send_wr(struct nvme_rdma_qpair *rqpair, struct ibv_send_wr *wr)
687 {
688 	assert(wr->next == NULL);
689 
690 	assert(rqpair->current_num_sends < rqpair->num_entries);
691 
692 	rqpair->current_num_sends++;
693 	spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, wr);
694 
695 	if (!rqpair->delay_cmd_submit) {
696 		return nvme_rdma_qpair_submit_sends(rqpair);
697 	}
698 
699 	return 0;
700 }
701 
702 /* Append the given recv wr structure to the qpair's outstanding recvs list. */
703 /* This function accepts only a single wr. */
704 static inline int
705 nvme_rdma_qpair_queue_recv_wr(struct nvme_rdma_qpair *rqpair, struct ibv_recv_wr *wr)
706 {
707 
708 	assert(wr->next == NULL);
709 	assert(rqpair->current_num_recvs < rqpair->num_entries);
710 
711 	rqpair->current_num_recvs++;
712 	spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, wr);
713 
714 	if (!rqpair->delay_cmd_submit) {
715 		return nvme_rdma_qpair_submit_recvs(rqpair);
716 	}
717 
718 	return 0;
719 }
720 
721 #define nvme_rdma_trace_ibv_sge(sg_list) \
722 	if (sg_list) { \
723 		SPDK_DEBUGLOG(nvme, "local addr %p length 0x%x lkey 0x%x\n", \
724 			      (void *)(sg_list)->addr, (sg_list)->length, (sg_list)->lkey); \
725 	}
726 
727 static int
728 nvme_rdma_post_recv(struct nvme_rdma_qpair *rqpair, uint16_t rsp_idx)
729 {
730 	struct ibv_recv_wr *wr;
731 
732 	wr = &rqpair->rsp_recv_wrs[rsp_idx];
733 	wr->next = NULL;
734 	nvme_rdma_trace_ibv_sge(wr->sg_list);
735 	return nvme_rdma_qpair_queue_recv_wr(rqpair, wr);
736 }
737 
738 static int
739 nvme_rdma_reg_mr(struct rdma_cm_id *cm_id, union nvme_rdma_mr *mr, void *mem, size_t length)
740 {
741 	if (!g_nvme_hooks.get_rkey) {
742 		mr->mr = rdma_reg_msgs(cm_id, mem, length);
743 		if (mr->mr == NULL) {
744 			SPDK_ERRLOG("Unable to register mr: %s (%d)\n",
745 				    spdk_strerror(errno), errno);
746 			return -1;
747 		}
748 	} else {
749 		mr->key = g_nvme_hooks.get_rkey(cm_id->pd, mem, length);
750 	}
751 
752 	return 0;
753 }
754 
755 static void
756 nvme_rdma_dereg_mr(union nvme_rdma_mr *mr)
757 {
758 	if (!g_nvme_hooks.get_rkey) {
759 		if (mr->mr && rdma_dereg_mr(mr->mr)) {
760 			SPDK_ERRLOG("Unable to de-register mr\n");
761 		}
762 	} else {
763 		if (mr->key) {
764 			g_nvme_hooks.put_rkey(mr->key);
765 		}
766 	}
767 	memset(mr, 0, sizeof(*mr));
768 }
769 
770 static uint32_t
771 nvme_rdma_mr_get_lkey(union nvme_rdma_mr *mr)
772 {
773 	uint32_t lkey;
774 
775 	if (!g_nvme_hooks.get_rkey) {
776 		lkey = mr->mr->lkey;
777 	} else {
778 		lkey = *((uint64_t *) mr->key);
779 	}
780 
781 	return lkey;
782 }
783 
784 static void
785 nvme_rdma_unregister_rsps(struct nvme_rdma_qpair *rqpair)
786 {
787 	nvme_rdma_dereg_mr(&rqpair->rsp_mr);
788 }
789 
790 static void
791 nvme_rdma_free_rsps(struct nvme_rdma_qpair *rqpair)
792 {
793 	nvme_rdma_free(rqpair->rsps);
794 	rqpair->rsps = NULL;
795 	nvme_rdma_free(rqpair->rsp_sgls);
796 	rqpair->rsp_sgls = NULL;
797 	nvme_rdma_free(rqpair->rsp_recv_wrs);
798 	rqpair->rsp_recv_wrs = NULL;
799 }
800 
801 static int
802 nvme_rdma_alloc_rsps(struct nvme_rdma_qpair *rqpair)
803 {
804 	rqpair->rsps = NULL;
805 	rqpair->rsp_recv_wrs = NULL;
806 
807 	rqpair->rsp_sgls = nvme_rdma_calloc(rqpair->num_entries, sizeof(*rqpair->rsp_sgls));
808 	if (!rqpair->rsp_sgls) {
809 		SPDK_ERRLOG("Failed to allocate rsp_sgls\n");
810 		goto fail;
811 	}
812 
813 	rqpair->rsp_recv_wrs = nvme_rdma_calloc(rqpair->num_entries, sizeof(*rqpair->rsp_recv_wrs));
814 	if (!rqpair->rsp_recv_wrs) {
815 		SPDK_ERRLOG("Failed to allocate rsp_recv_wrs\n");
816 		goto fail;
817 	}
818 
819 	rqpair->rsps = nvme_rdma_calloc(rqpair->num_entries, sizeof(*rqpair->rsps));
820 	if (!rqpair->rsps) {
821 		SPDK_ERRLOG("can not allocate rdma rsps\n");
822 		goto fail;
823 	}
824 
825 	return 0;
826 fail:
827 	nvme_rdma_free_rsps(rqpair);
828 	return -ENOMEM;
829 }
830 
831 static int
832 nvme_rdma_register_rsps(struct nvme_rdma_qpair *rqpair)
833 {
834 	uint16_t i;
835 	int rc;
836 	uint32_t lkey;
837 
838 	rc = nvme_rdma_reg_mr(rqpair->cm_id, &rqpair->rsp_mr,
839 			      rqpair->rsps, rqpair->num_entries * sizeof(*rqpair->rsps));
840 
841 	if (rc < 0) {
842 		goto fail;
843 	}
844 
845 	lkey = nvme_rdma_mr_get_lkey(&rqpair->rsp_mr);
846 
847 	for (i = 0; i < rqpair->num_entries; i++) {
848 		struct ibv_sge *rsp_sgl = &rqpair->rsp_sgls[i];
849 		struct spdk_nvme_rdma_rsp *rsp = &rqpair->rsps[i];
850 
851 		rsp->rqpair = rqpair;
852 		rsp->rdma_wr.type = RDMA_WR_TYPE_RECV;
853 		rsp->idx = i;
854 		rsp_sgl->addr = (uint64_t)&rqpair->rsps[i];
855 		rsp_sgl->length = sizeof(struct spdk_nvme_cpl);
856 		rsp_sgl->lkey = lkey;
857 
858 		rqpair->rsp_recv_wrs[i].wr_id = (uint64_t)&rsp->rdma_wr;
859 		rqpair->rsp_recv_wrs[i].next = NULL;
860 		rqpair->rsp_recv_wrs[i].sg_list = rsp_sgl;
861 		rqpair->rsp_recv_wrs[i].num_sge = 1;
862 
863 		rc = nvme_rdma_post_recv(rqpair, i);
864 		if (rc) {
865 			goto fail;
866 		}
867 	}
868 
869 	rc = nvme_rdma_qpair_submit_recvs(rqpair);
870 	if (rc) {
871 		goto fail;
872 	}
873 
874 	return 0;
875 
876 fail:
877 	nvme_rdma_unregister_rsps(rqpair);
878 	return rc;
879 }
880 
881 static void
882 nvme_rdma_unregister_reqs(struct nvme_rdma_qpair *rqpair)
883 {
884 	nvme_rdma_dereg_mr(&rqpair->cmd_mr);
885 }
886 
887 static void
888 nvme_rdma_free_reqs(struct nvme_rdma_qpair *rqpair)
889 {
890 	if (!rqpair->rdma_reqs) {
891 		return;
892 	}
893 
894 	nvme_rdma_free(rqpair->cmds);
895 	rqpair->cmds = NULL;
896 
897 	nvme_rdma_free(rqpair->rdma_reqs);
898 	rqpair->rdma_reqs = NULL;
899 }
900 
901 static int
902 nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
903 {
904 	uint16_t i;
905 
906 	rqpair->rdma_reqs = nvme_rdma_calloc(rqpair->num_entries, sizeof(struct spdk_nvme_rdma_req));
907 	if (rqpair->rdma_reqs == NULL) {
908 		SPDK_ERRLOG("Failed to allocate rdma_reqs\n");
909 		goto fail;
910 	}
911 
912 	rqpair->cmds = nvme_rdma_calloc(rqpair->num_entries, sizeof(*rqpair->cmds));
913 	if (!rqpair->cmds) {
914 		SPDK_ERRLOG("Failed to allocate RDMA cmds\n");
915 		goto fail;
916 	}
917 
918 
919 	TAILQ_INIT(&rqpair->free_reqs);
920 	TAILQ_INIT(&rqpair->outstanding_reqs);
921 	for (i = 0; i < rqpair->num_entries; i++) {
922 		struct spdk_nvme_rdma_req	*rdma_req;
923 		struct spdk_nvmf_cmd		*cmd;
924 
925 		rdma_req = &rqpair->rdma_reqs[i];
926 		rdma_req->rdma_wr.type = RDMA_WR_TYPE_SEND;
927 		cmd = &rqpair->cmds[i];
928 
929 		rdma_req->id = i;
930 
931 		/* The first RDMA sgl element will always point
932 		 * at this data structure. Depending on whether
933 		 * an NVMe-oF SGL is required, the length of
934 		 * this element may change. */
935 		rdma_req->send_sgl[0].addr = (uint64_t)cmd;
936 		rdma_req->send_wr.wr_id = (uint64_t)&rdma_req->rdma_wr;
937 		rdma_req->send_wr.next = NULL;
938 		rdma_req->send_wr.opcode = IBV_WR_SEND;
939 		rdma_req->send_wr.send_flags = IBV_SEND_SIGNALED;
940 		rdma_req->send_wr.sg_list = rdma_req->send_sgl;
941 		rdma_req->send_wr.imm_data = 0;
942 
943 		TAILQ_INSERT_TAIL(&rqpair->free_reqs, rdma_req, link);
944 	}
945 
946 	return 0;
947 fail:
948 	nvme_rdma_free_reqs(rqpair);
949 	return -ENOMEM;
950 }
951 
952 static int
953 nvme_rdma_register_reqs(struct nvme_rdma_qpair *rqpair)
954 {
955 	int i;
956 	int rc;
957 	uint32_t lkey;
958 
959 	rc = nvme_rdma_reg_mr(rqpair->cm_id, &rqpair->cmd_mr,
960 			      rqpair->cmds, rqpair->num_entries * sizeof(*rqpair->cmds));
961 
962 	if (rc < 0) {
963 		goto fail;
964 	}
965 
966 	lkey = nvme_rdma_mr_get_lkey(&rqpair->cmd_mr);
967 
968 	for (i = 0; i < rqpair->num_entries; i++) {
969 		rqpair->rdma_reqs[i].send_sgl[0].lkey = lkey;
970 	}
971 
972 	return 0;
973 
974 fail:
975 	nvme_rdma_unregister_reqs(rqpair);
976 	return -ENOMEM;
977 }
978 
979 static int
980 nvme_rdma_resolve_addr(struct nvme_rdma_qpair *rqpair,
981 		       struct sockaddr *src_addr,
982 		       struct sockaddr *dst_addr,
983 		       struct rdma_event_channel *cm_channel)
984 {
985 	int ret;
986 
987 	ret = rdma_resolve_addr(rqpair->cm_id, src_addr, dst_addr,
988 				NVME_RDMA_TIME_OUT_IN_MS);
989 	if (ret) {
990 		SPDK_ERRLOG("rdma_resolve_addr, %d\n", errno);
991 		return ret;
992 	}
993 
994 	ret = nvme_rdma_process_event(rqpair, cm_channel, RDMA_CM_EVENT_ADDR_RESOLVED);
995 	if (ret) {
996 		SPDK_ERRLOG("RDMA address resolution error\n");
997 		return -1;
998 	}
999 
1000 	if (rqpair->qpair.ctrlr->opts.transport_ack_timeout != SPDK_NVME_TRANSPORT_ACK_TIMEOUT_DISABLED) {
1001 #ifdef SPDK_CONFIG_RDMA_SET_ACK_TIMEOUT
1002 		uint8_t timeout = rqpair->qpair.ctrlr->opts.transport_ack_timeout;
1003 		ret = rdma_set_option(rqpair->cm_id, RDMA_OPTION_ID,
1004 				      RDMA_OPTION_ID_ACK_TIMEOUT,
1005 				      &timeout, sizeof(timeout));
1006 		if (ret) {
1007 			SPDK_NOTICELOG("Can't apply RDMA_OPTION_ID_ACK_TIMEOUT %d, ret %d\n", timeout, ret);
1008 		}
1009 #else
1010 		SPDK_DEBUGLOG(nvme, "transport_ack_timeout is not supported\n");
1011 #endif
1012 	}
1013 
1014 
1015 	ret = rdma_resolve_route(rqpair->cm_id, NVME_RDMA_TIME_OUT_IN_MS);
1016 	if (ret) {
1017 		SPDK_ERRLOG("rdma_resolve_route\n");
1018 		return ret;
1019 	}
1020 
1021 	ret = nvme_rdma_process_event(rqpair, cm_channel, RDMA_CM_EVENT_ROUTE_RESOLVED);
1022 	if (ret) {
1023 		SPDK_ERRLOG("RDMA route resolution error\n");
1024 		return -1;
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 static int
1031 nvme_rdma_connect(struct nvme_rdma_qpair *rqpair)
1032 {
1033 	struct rdma_conn_param				param = {};
1034 	struct spdk_nvmf_rdma_request_private_data	request_data = {};
1035 	struct ibv_device_attr				attr;
1036 	int						ret;
1037 	struct spdk_nvme_ctrlr				*ctrlr;
1038 	struct nvme_rdma_ctrlr				*rctrlr;
1039 
1040 	ret = ibv_query_device(rqpair->cm_id->verbs, &attr);
1041 	if (ret != 0) {
1042 		SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
1043 		return ret;
1044 	}
1045 
1046 	param.responder_resources = spdk_min(rqpair->num_entries, attr.max_qp_rd_atom);
1047 
1048 	ctrlr = rqpair->qpair.ctrlr;
1049 	if (!ctrlr) {
1050 		return -1;
1051 	}
1052 	rctrlr = nvme_rdma_ctrlr(ctrlr);
1053 	assert(rctrlr != NULL);
1054 
1055 	request_data.qid = rqpair->qpair.id;
1056 	request_data.hrqsize = rqpair->num_entries;
1057 	request_data.hsqsize = rqpair->num_entries - 1;
1058 	request_data.cntlid = ctrlr->cntlid;
1059 
1060 	param.private_data = &request_data;
1061 	param.private_data_len = sizeof(request_data);
1062 	param.retry_count = ctrlr->opts.transport_retry_count;
1063 	param.rnr_retry_count = 7;
1064 
1065 	/* Fields below are ignored by rdma cm if qpair has been
1066 	 * created using rdma cm API. */
1067 	param.srq = 0;
1068 	param.qp_num = rqpair->rdma_qp->qp->qp_num;
1069 
1070 	ret = rdma_connect(rqpair->cm_id, &param);
1071 	if (ret) {
1072 		SPDK_ERRLOG("nvme rdma connect error\n");
1073 		return ret;
1074 	}
1075 
1076 	ret = nvme_rdma_process_event(rqpair, rctrlr->cm_channel, RDMA_CM_EVENT_ESTABLISHED);
1077 	if (ret == -ESTALE) {
1078 		SPDK_NOTICELOG("Received a stale connection notice during connection.\n");
1079 		return -EAGAIN;
1080 	} else if (ret) {
1081 		SPDK_ERRLOG("RDMA connect error %d\n", ret);
1082 		return ret;
1083 	} else {
1084 		return 0;
1085 	}
1086 }
1087 
1088 static int
1089 nvme_rdma_parse_addr(struct sockaddr_storage *sa, int family, const char *addr, const char *service)
1090 {
1091 	struct addrinfo *res;
1092 	struct addrinfo hints;
1093 	int ret;
1094 
1095 	memset(&hints, 0, sizeof(hints));
1096 	hints.ai_family = family;
1097 	hints.ai_socktype = SOCK_STREAM;
1098 	hints.ai_protocol = 0;
1099 
1100 	ret = getaddrinfo(addr, service, &hints, &res);
1101 	if (ret) {
1102 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(ret), ret);
1103 		return ret;
1104 	}
1105 
1106 	if (res->ai_addrlen > sizeof(*sa)) {
1107 		SPDK_ERRLOG("getaddrinfo() ai_addrlen %zu too large\n", (size_t)res->ai_addrlen);
1108 		ret = EINVAL;
1109 	} else {
1110 		memcpy(sa, res->ai_addr, res->ai_addrlen);
1111 	}
1112 
1113 	freeaddrinfo(res);
1114 	return ret;
1115 }
1116 
1117 static int
1118 _nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1119 {
1120 	struct sockaddr_storage dst_addr;
1121 	struct sockaddr_storage src_addr;
1122 	bool src_addr_specified;
1123 	int rc;
1124 	struct nvme_rdma_ctrlr *rctrlr;
1125 	struct nvme_rdma_qpair *rqpair;
1126 	int family;
1127 
1128 	rqpair = nvme_rdma_qpair(qpair);
1129 	rctrlr = nvme_rdma_ctrlr(ctrlr);
1130 	assert(rctrlr != NULL);
1131 
1132 	switch (ctrlr->trid.adrfam) {
1133 	case SPDK_NVMF_ADRFAM_IPV4:
1134 		family = AF_INET;
1135 		break;
1136 	case SPDK_NVMF_ADRFAM_IPV6:
1137 		family = AF_INET6;
1138 		break;
1139 	default:
1140 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", ctrlr->trid.adrfam);
1141 		return -1;
1142 	}
1143 
1144 	SPDK_DEBUGLOG(nvme, "adrfam %d ai_family %d\n", ctrlr->trid.adrfam, family);
1145 
1146 	memset(&dst_addr, 0, sizeof(dst_addr));
1147 
1148 	SPDK_DEBUGLOG(nvme, "trsvcid is %s\n", ctrlr->trid.trsvcid);
1149 	rc = nvme_rdma_parse_addr(&dst_addr, family, ctrlr->trid.traddr, ctrlr->trid.trsvcid);
1150 	if (rc != 0) {
1151 		SPDK_ERRLOG("dst_addr nvme_rdma_parse_addr() failed\n");
1152 		return -1;
1153 	}
1154 
1155 	if (ctrlr->opts.src_addr[0] || ctrlr->opts.src_svcid[0]) {
1156 		memset(&src_addr, 0, sizeof(src_addr));
1157 		rc = nvme_rdma_parse_addr(&src_addr, family, ctrlr->opts.src_addr, ctrlr->opts.src_svcid);
1158 		if (rc != 0) {
1159 			SPDK_ERRLOG("src_addr nvme_rdma_parse_addr() failed\n");
1160 			return -1;
1161 		}
1162 		src_addr_specified = true;
1163 	} else {
1164 		src_addr_specified = false;
1165 	}
1166 
1167 	rc = rdma_create_id(rctrlr->cm_channel, &rqpair->cm_id, rqpair, RDMA_PS_TCP);
1168 	if (rc < 0) {
1169 		SPDK_ERRLOG("rdma_create_id() failed\n");
1170 		return -1;
1171 	}
1172 
1173 	rc = nvme_rdma_resolve_addr(rqpair,
1174 				    src_addr_specified ? (struct sockaddr *)&src_addr : NULL,
1175 				    (struct sockaddr *)&dst_addr, rctrlr->cm_channel);
1176 	if (rc < 0) {
1177 		SPDK_ERRLOG("nvme_rdma_resolve_addr() failed\n");
1178 		return -1;
1179 	}
1180 
1181 	rc = nvme_rdma_qpair_init(rqpair);
1182 	if (rc < 0) {
1183 		SPDK_ERRLOG("nvme_rdma_qpair_init() failed\n");
1184 		return -1;
1185 	}
1186 
1187 	rc = nvme_rdma_connect(rqpair);
1188 	if (rc != 0) {
1189 		SPDK_ERRLOG("Unable to connect the rqpair\n");
1190 		return rc;
1191 	}
1192 
1193 	rc = nvme_rdma_register_reqs(rqpair);
1194 	SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
1195 	if (rc) {
1196 		SPDK_ERRLOG("Unable to register rqpair RDMA requests\n");
1197 		return -1;
1198 	}
1199 	SPDK_DEBUGLOG(nvme, "RDMA requests registered\n");
1200 
1201 	rc = nvme_rdma_register_rsps(rqpair);
1202 	SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
1203 	if (rc < 0) {
1204 		SPDK_ERRLOG("Unable to register rqpair RDMA responses\n");
1205 		return -1;
1206 	}
1207 	SPDK_DEBUGLOG(nvme, "RDMA responses registered\n");
1208 
1209 	rqpair->mr_map = spdk_rdma_create_mem_map(rqpair->rdma_qp->qp->pd, &g_nvme_hooks);
1210 	if (!rqpair->mr_map) {
1211 		SPDK_ERRLOG("Unable to register RDMA memory translation map\n");
1212 		return -1;
1213 	}
1214 
1215 	rc = nvme_fabric_qpair_connect(&rqpair->qpair, rqpair->num_entries);
1216 	if (rc < 0) {
1217 		rqpair->qpair.transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
1218 		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
1219 		return rc;
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 static int
1226 nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1227 {
1228 	int rc;
1229 	int retry_count = 0;
1230 
1231 	rc = _nvme_rdma_ctrlr_connect_qpair(ctrlr, qpair);
1232 
1233 	/*
1234 	 * -EAGAIN represents the special case where the target side still thought it was connected.
1235 	 * Most NICs will fail the first connection attempt, and the NICs will clean up whatever
1236 	 * state they need to. After that, subsequent connection attempts will succeed.
1237 	 */
1238 	if (rc == -EAGAIN) {
1239 		SPDK_NOTICELOG("Detected stale connection on Target side for qpid: %d\n", qpair->id);
1240 		do {
1241 			nvme_delay(NVME_RDMA_STALE_CONN_RETRY_DELAY_US);
1242 			nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
1243 			rc = _nvme_rdma_ctrlr_connect_qpair(ctrlr, qpair);
1244 			retry_count++;
1245 		} while (rc == -EAGAIN && retry_count < NVME_RDMA_STALE_CONN_RETRY_MAX);
1246 	}
1247 
1248 	return rc;
1249 }
1250 
1251 /*
1252  * Build SGL describing empty payload.
1253  */
1254 static int
1255 nvme_rdma_build_null_request(struct spdk_nvme_rdma_req *rdma_req)
1256 {
1257 	struct nvme_request *req = rdma_req->req;
1258 
1259 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1260 
1261 	/* The first element of this SGL is pointing at an
1262 	 * spdk_nvmf_cmd object. For this particular command,
1263 	 * we only need the first 64 bytes corresponding to
1264 	 * the NVMe command. */
1265 	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
1266 
1267 	/* The RDMA SGL needs one element describing the NVMe command. */
1268 	rdma_req->send_wr.num_sge = 1;
1269 
1270 	req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1271 	req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1272 	req->cmd.dptr.sgl1.keyed.length = 0;
1273 	req->cmd.dptr.sgl1.keyed.key = 0;
1274 	req->cmd.dptr.sgl1.address = 0;
1275 
1276 	return 0;
1277 }
1278 
1279 /*
1280  * Build inline SGL describing contiguous payload buffer.
1281  */
1282 static int
1283 nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
1284 				      struct spdk_nvme_rdma_req *rdma_req)
1285 {
1286 	struct nvme_request *req = rdma_req->req;
1287 	struct spdk_rdma_memory_translation mem_translation;
1288 	void *payload;
1289 	int rc;
1290 
1291 	payload = req->payload.contig_or_cb_arg + req->payload_offset;
1292 	assert(req->payload_size != 0);
1293 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
1294 
1295 	rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
1296 	if (spdk_unlikely(rc)) {
1297 		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
1298 		return -1;
1299 	}
1300 
1301 	rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
1302 
1303 	/* The first element of this SGL is pointing at an
1304 	 * spdk_nvmf_cmd object. For this particular command,
1305 	 * we only need the first 64 bytes corresponding to
1306 	 * the NVMe command. */
1307 	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
1308 
1309 	rdma_req->send_sgl[1].addr = (uint64_t)payload;
1310 	rdma_req->send_sgl[1].length = (uint32_t)req->payload_size;
1311 
1312 	/* The RDMA SGL contains two elements. The first describes
1313 	 * the NVMe command and the second describes the data
1314 	 * payload. */
1315 	rdma_req->send_wr.num_sge = 2;
1316 
1317 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1318 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1319 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1320 	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
1321 	/* Inline only supported for icdoff == 0 currently.  This function will
1322 	 * not get called for controllers with other values. */
1323 	req->cmd.dptr.sgl1.address = (uint64_t)0;
1324 
1325 	return 0;
1326 }
1327 
1328 /*
1329  * Build SGL describing contiguous payload buffer.
1330  */
1331 static int
1332 nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
1333 			       struct spdk_nvme_rdma_req *rdma_req)
1334 {
1335 	struct nvme_request *req = rdma_req->req;
1336 	void *payload = req->payload.contig_or_cb_arg + req->payload_offset;
1337 	struct spdk_rdma_memory_translation mem_translation;
1338 	int rc;
1339 
1340 	assert(req->payload_size != 0);
1341 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
1342 
1343 	if (spdk_unlikely(req->payload_size > NVME_RDMA_MAX_KEYED_SGL_LENGTH)) {
1344 		SPDK_ERRLOG("SGL length %u exceeds max keyed SGL block size %u\n",
1345 			    req->payload_size, NVME_RDMA_MAX_KEYED_SGL_LENGTH);
1346 		return -1;
1347 	}
1348 
1349 	rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
1350 	if (spdk_unlikely(rc)) {
1351 		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
1352 		return -1;
1353 	}
1354 
1355 	req->cmd.dptr.sgl1.keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
1356 
1357 	/* The first element of this SGL is pointing at an
1358 	 * spdk_nvmf_cmd object. For this particular command,
1359 	 * we only need the first 64 bytes corresponding to
1360 	 * the NVMe command. */
1361 	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
1362 
1363 	/* The RDMA SGL needs one element describing the NVMe command. */
1364 	rdma_req->send_wr.num_sge = 1;
1365 
1366 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1367 	req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1368 	req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1369 	req->cmd.dptr.sgl1.keyed.length = req->payload_size;
1370 	req->cmd.dptr.sgl1.address = (uint64_t)payload;
1371 
1372 	return 0;
1373 }
1374 
1375 /*
1376  * Build SGL describing scattered payload buffer.
1377  */
1378 static int
1379 nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
1380 			    struct spdk_nvme_rdma_req *rdma_req)
1381 {
1382 	struct nvme_request *req = rdma_req->req;
1383 	struct spdk_nvmf_cmd *cmd = &rqpair->cmds[rdma_req->id];
1384 	struct spdk_rdma_memory_translation mem_translation;
1385 	void *virt_addr;
1386 	uint32_t remaining_size;
1387 	uint32_t sge_length;
1388 	int rc, max_num_sgl, num_sgl_desc;
1389 
1390 	assert(req->payload_size != 0);
1391 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
1392 	assert(req->payload.reset_sgl_fn != NULL);
1393 	assert(req->payload.next_sge_fn != NULL);
1394 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
1395 
1396 	max_num_sgl = req->qpair->ctrlr->max_sges;
1397 
1398 	remaining_size = req->payload_size;
1399 	num_sgl_desc = 0;
1400 	do {
1401 		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &sge_length);
1402 		if (rc) {
1403 			return -1;
1404 		}
1405 
1406 		sge_length = spdk_min(remaining_size, sge_length);
1407 
1408 		if (spdk_unlikely(sge_length > NVME_RDMA_MAX_KEYED_SGL_LENGTH)) {
1409 			SPDK_ERRLOG("SGL length %u exceeds max keyed SGL block size %u\n",
1410 				    sge_length, NVME_RDMA_MAX_KEYED_SGL_LENGTH);
1411 			return -1;
1412 		}
1413 		rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, sge_length, &mem_translation);
1414 		if (spdk_unlikely(rc)) {
1415 			SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
1416 			return -1;
1417 		}
1418 
1419 		cmd->sgl[num_sgl_desc].keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
1420 		cmd->sgl[num_sgl_desc].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1421 		cmd->sgl[num_sgl_desc].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1422 		cmd->sgl[num_sgl_desc].keyed.length = sge_length;
1423 		cmd->sgl[num_sgl_desc].address = (uint64_t)virt_addr;
1424 
1425 		remaining_size -= sge_length;
1426 		num_sgl_desc++;
1427 	} while (remaining_size > 0 && num_sgl_desc < max_num_sgl);
1428 
1429 
1430 	/* Should be impossible if we did our sgl checks properly up the stack, but do a sanity check here. */
1431 	if (remaining_size > 0) {
1432 		return -1;
1433 	}
1434 
1435 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1436 
1437 	/* The RDMA SGL needs one element describing some portion
1438 	 * of the spdk_nvmf_cmd structure. */
1439 	rdma_req->send_wr.num_sge = 1;
1440 
1441 	/*
1442 	 * If only one SGL descriptor is required, it can be embedded directly in the command
1443 	 * as a data block descriptor.
1444 	 */
1445 	if (num_sgl_desc == 1) {
1446 		/* The first element of this SGL is pointing at an
1447 		 * spdk_nvmf_cmd object. For this particular command,
1448 		 * we only need the first 64 bytes corresponding to
1449 		 * the NVMe command. */
1450 		rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
1451 
1452 		req->cmd.dptr.sgl1.keyed.type = cmd->sgl[0].keyed.type;
1453 		req->cmd.dptr.sgl1.keyed.subtype = cmd->sgl[0].keyed.subtype;
1454 		req->cmd.dptr.sgl1.keyed.length = cmd->sgl[0].keyed.length;
1455 		req->cmd.dptr.sgl1.keyed.key = cmd->sgl[0].keyed.key;
1456 		req->cmd.dptr.sgl1.address = cmd->sgl[0].address;
1457 	} else {
1458 		/*
1459 		 * Otherwise, The SGL descriptor embedded in the command must point to the list of
1460 		 * SGL descriptors used to describe the operation. In that case it is a last segment descriptor.
1461 		 */
1462 		uint32_t descriptors_size = sizeof(struct spdk_nvme_sgl_descriptor) * num_sgl_desc;
1463 
1464 		if (spdk_unlikely(descriptors_size > rqpair->qpair.ctrlr->ioccsz_bytes)) {
1465 			SPDK_ERRLOG("Size of SGL descriptors (%u) exceeds ICD (%u)\n",
1466 				    descriptors_size, rqpair->qpair.ctrlr->ioccsz_bytes);
1467 			return -1;
1468 		}
1469 		rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd) + descriptors_size;
1470 
1471 		req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1472 		req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1473 		req->cmd.dptr.sgl1.unkeyed.length = descriptors_size;
1474 		req->cmd.dptr.sgl1.address = (uint64_t)0;
1475 	}
1476 
1477 	return 0;
1478 }
1479 
1480 /*
1481  * Build inline SGL describing sgl payload buffer.
1482  */
1483 static int
1484 nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
1485 				   struct spdk_nvme_rdma_req *rdma_req)
1486 {
1487 	struct nvme_request *req = rdma_req->req;
1488 	struct spdk_rdma_memory_translation mem_translation;
1489 	uint32_t length;
1490 	void *virt_addr;
1491 	int rc;
1492 
1493 	assert(req->payload_size != 0);
1494 	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL);
1495 	assert(req->payload.reset_sgl_fn != NULL);
1496 	assert(req->payload.next_sge_fn != NULL);
1497 	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);
1498 
1499 	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
1500 	if (rc) {
1501 		return -1;
1502 	}
1503 
1504 	if (length < req->payload_size) {
1505 		SPDK_DEBUGLOG(nvme, "Inline SGL request split so sending separately.\n");
1506 		return nvme_rdma_build_sgl_request(rqpair, rdma_req);
1507 	}
1508 
1509 	if (length > req->payload_size) {
1510 		length = req->payload_size;
1511 	}
1512 
1513 	rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, length, &mem_translation);
1514 	if (spdk_unlikely(rc)) {
1515 		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
1516 		return -1;
1517 	}
1518 
1519 	rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
1520 	rdma_req->send_sgl[1].length = length;
1521 	rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
1522 
1523 	rdma_req->send_wr.num_sge = 2;
1524 
1525 	/* The first element of this SGL is pointing at an
1526 	 * spdk_nvmf_cmd object. For this particular command,
1527 	 * we only need the first 64 bytes corresponding to
1528 	 * the NVMe command. */
1529 	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);
1530 
1531 	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
1532 	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
1533 	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1534 	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
1535 	/* Inline only supported for icdoff == 0 currently.  This function will
1536 	 * not get called for controllers with other values. */
1537 	req->cmd.dptr.sgl1.address = (uint64_t)0;
1538 
1539 	return 0;
1540 }
1541 
1542 static int
1543 nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
1544 		   struct spdk_nvme_rdma_req *rdma_req)
1545 {
1546 	struct spdk_nvme_ctrlr *ctrlr = rqpair->qpair.ctrlr;
1547 	enum nvme_payload_type payload_type;
1548 	bool icd_supported;
1549 	int rc;
1550 
1551 	assert(rdma_req->req == NULL);
1552 	rdma_req->req = req;
1553 	req->cmd.cid = rdma_req->id;
1554 	payload_type = nvme_payload_type(&req->payload);
1555 	/*
1556 	 * Check if icdoff is non zero, to avoid interop conflicts with
1557 	 * targets with non-zero icdoff.  Both SPDK and the Linux kernel
1558 	 * targets use icdoff = 0.  For targets with non-zero icdoff, we
1559 	 * will currently just not use inline data for now.
1560 	 */
1561 	icd_supported = spdk_nvme_opc_get_data_transfer(req->cmd.opc) == SPDK_NVME_DATA_HOST_TO_CONTROLLER
1562 			&& req->payload_size <= ctrlr->ioccsz_bytes && ctrlr->icdoff == 0;
1563 
1564 	if (req->payload_size == 0) {
1565 		rc = nvme_rdma_build_null_request(rdma_req);
1566 	} else if (payload_type == NVME_PAYLOAD_TYPE_CONTIG) {
1567 		if (icd_supported) {
1568 			rc = nvme_rdma_build_contig_inline_request(rqpair, rdma_req);
1569 		} else {
1570 			rc = nvme_rdma_build_contig_request(rqpair, rdma_req);
1571 		}
1572 	} else if (payload_type == NVME_PAYLOAD_TYPE_SGL) {
1573 		if (icd_supported) {
1574 			rc = nvme_rdma_build_sgl_inline_request(rqpair, rdma_req);
1575 		} else {
1576 			rc = nvme_rdma_build_sgl_request(rqpair, rdma_req);
1577 		}
1578 	} else {
1579 		rc = -1;
1580 	}
1581 
1582 	if (rc) {
1583 		rdma_req->req = NULL;
1584 		return rc;
1585 	}
1586 
1587 	memcpy(&rqpair->cmds[rdma_req->id], &req->cmd, sizeof(req->cmd));
1588 	return 0;
1589 }
1590 
1591 static struct spdk_nvme_qpair *
1592 nvme_rdma_ctrlr_create_qpair(struct spdk_nvme_ctrlr *ctrlr,
1593 			     uint16_t qid, uint32_t qsize,
1594 			     enum spdk_nvme_qprio qprio,
1595 			     uint32_t num_requests,
1596 			     bool delay_cmd_submit)
1597 {
1598 	struct nvme_rdma_qpair *rqpair;
1599 	struct spdk_nvme_qpair *qpair;
1600 	int rc;
1601 
1602 	rqpair = nvme_rdma_calloc(1, sizeof(struct nvme_rdma_qpair));
1603 	if (!rqpair) {
1604 		SPDK_ERRLOG("failed to get create rqpair\n");
1605 		return NULL;
1606 	}
1607 
1608 	rqpair->num_entries = qsize;
1609 	rqpair->delay_cmd_submit = delay_cmd_submit;
1610 	qpair = &rqpair->qpair;
1611 	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, num_requests);
1612 	if (rc != 0) {
1613 		nvme_rdma_free(rqpair);
1614 		return NULL;
1615 	}
1616 
1617 	rc = nvme_rdma_alloc_reqs(rqpair);
1618 	SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
1619 	if (rc) {
1620 		SPDK_ERRLOG("Unable to allocate rqpair RDMA requests\n");
1621 		nvme_rdma_free(rqpair);
1622 		return NULL;
1623 	}
1624 	SPDK_DEBUGLOG(nvme, "RDMA requests allocated\n");
1625 
1626 	rc = nvme_rdma_alloc_rsps(rqpair);
1627 	SPDK_DEBUGLOG(nvme, "rc =%d\n", rc);
1628 	if (rc < 0) {
1629 		SPDK_ERRLOG("Unable to allocate rqpair RDMA responses\n");
1630 		nvme_rdma_free_reqs(rqpair);
1631 		nvme_rdma_free(rqpair);
1632 		return NULL;
1633 	}
1634 	SPDK_DEBUGLOG(nvme, "RDMA responses allocated\n");
1635 
1636 	return qpair;
1637 }
1638 
1639 static void
1640 nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1641 {
1642 	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
1643 	struct nvme_rdma_ctrlr *rctrlr = NULL;
1644 	struct nvme_rdma_cm_event_entry *entry, *tmp;
1645 	int rc;
1646 
1647 	spdk_rdma_free_mem_map(&rqpair->mr_map);
1648 	nvme_rdma_unregister_reqs(rqpair);
1649 	nvme_rdma_unregister_rsps(rqpair);
1650 
1651 	if (rqpair->evt) {
1652 		rdma_ack_cm_event(rqpair->evt);
1653 		rqpair->evt = NULL;
1654 	}
1655 
1656 	/*
1657 	 * This works because we have the controller lock both in
1658 	 * this function and in the function where we add new events.
1659 	 */
1660 	if (qpair->ctrlr != NULL) {
1661 		rctrlr = nvme_rdma_ctrlr(qpair->ctrlr);
1662 		STAILQ_FOREACH_SAFE(entry, &rctrlr->pending_cm_events, link, tmp) {
1663 			if (nvme_rdma_qpair(entry->evt->id->context) == rqpair) {
1664 				STAILQ_REMOVE(&rctrlr->pending_cm_events, entry, nvme_rdma_cm_event_entry, link);
1665 				rdma_ack_cm_event(entry->evt);
1666 				STAILQ_INSERT_HEAD(&rctrlr->free_cm_events, entry, link);
1667 			}
1668 		}
1669 	}
1670 
1671 	if (rqpair->cm_id) {
1672 		if (rqpair->rdma_qp) {
1673 			rc = spdk_rdma_qp_disconnect(rqpair->rdma_qp);
1674 			if ((rctrlr != NULL) && (rc == 0)) {
1675 				if (nvme_rdma_process_event(rqpair, rctrlr->cm_channel, RDMA_CM_EVENT_DISCONNECTED)) {
1676 					SPDK_DEBUGLOG(nvme, "Target did not respond to qpair disconnect.\n");
1677 				}
1678 			}
1679 			spdk_rdma_qp_destroy(rqpair->rdma_qp);
1680 			rqpair->rdma_qp = NULL;
1681 		}
1682 
1683 		rdma_destroy_id(rqpair->cm_id);
1684 		rqpair->cm_id = NULL;
1685 	}
1686 
1687 	if (rqpair->cq) {
1688 		ibv_destroy_cq(rqpair->cq);
1689 		rqpair->cq = NULL;
1690 	}
1691 }
1692 
1693 static void nvme_rdma_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr);
1694 
1695 static int
1696 nvme_rdma_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
1697 {
1698 	struct nvme_rdma_qpair *rqpair;
1699 
1700 	assert(qpair != NULL);
1701 	rqpair = nvme_rdma_qpair(qpair);
1702 	nvme_transport_ctrlr_disconnect_qpair(ctrlr, qpair);
1703 	if (rqpair->defer_deletion_to_pg) {
1704 		nvme_qpair_set_state(qpair, NVME_QPAIR_DESTROYING);
1705 		return 0;
1706 	}
1707 
1708 	nvme_rdma_qpair_abort_reqs(qpair, 1);
1709 	nvme_qpair_deinit(qpair);
1710 
1711 	nvme_rdma_free_reqs(rqpair);
1712 	nvme_rdma_free_rsps(rqpair);
1713 	nvme_rdma_free(rqpair);
1714 
1715 	return 0;
1716 }
1717 
1718 static struct spdk_nvme_qpair *
1719 nvme_rdma_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
1720 				const struct spdk_nvme_io_qpair_opts *opts)
1721 {
1722 	return nvme_rdma_ctrlr_create_qpair(ctrlr, qid, opts->io_queue_size, opts->qprio,
1723 					    opts->io_queue_requests,
1724 					    opts->delay_cmd_submit);
1725 }
1726 
1727 static int
1728 nvme_rdma_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
1729 {
1730 	/* do nothing here */
1731 	return 0;
1732 }
1733 
1734 static int nvme_rdma_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr);
1735 
1736 static struct spdk_nvme_ctrlr *nvme_rdma_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
1737 		const struct spdk_nvme_ctrlr_opts *opts,
1738 		void *devhandle)
1739 {
1740 	struct nvme_rdma_ctrlr *rctrlr;
1741 	struct ibv_context **contexts;
1742 	struct ibv_device_attr dev_attr;
1743 	int i, flag, rc;
1744 
1745 	rctrlr = nvme_rdma_calloc(1, sizeof(struct nvme_rdma_ctrlr));
1746 	if (rctrlr == NULL) {
1747 		SPDK_ERRLOG("could not allocate ctrlr\n");
1748 		return NULL;
1749 	}
1750 
1751 	rctrlr->ctrlr.opts = *opts;
1752 	rctrlr->ctrlr.trid = *trid;
1753 
1754 	if (opts->transport_retry_count > NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT) {
1755 		SPDK_NOTICELOG("transport_retry_count exceeds max value %d, use max value\n",
1756 			       NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
1757 		rctrlr->ctrlr.opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT;
1758 	}
1759 
1760 	if (opts->transport_ack_timeout > NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT) {
1761 		SPDK_NOTICELOG("transport_ack_timeout exceeds max value %d, use max value\n",
1762 			       NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
1763 		rctrlr->ctrlr.opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT;
1764 	}
1765 
1766 	contexts = rdma_get_devices(NULL);
1767 	if (contexts == NULL) {
1768 		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
1769 		nvme_rdma_free(rctrlr);
1770 		return NULL;
1771 	}
1772 
1773 	i = 0;
1774 	rctrlr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
1775 
1776 	while (contexts[i] != NULL) {
1777 		rc = ibv_query_device(contexts[i], &dev_attr);
1778 		if (rc < 0) {
1779 			SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
1780 			rdma_free_devices(contexts);
1781 			nvme_rdma_free(rctrlr);
1782 			return NULL;
1783 		}
1784 		rctrlr->max_sge = spdk_min(rctrlr->max_sge, (uint16_t)dev_attr.max_sge);
1785 		i++;
1786 	}
1787 
1788 	rdma_free_devices(contexts);
1789 
1790 	rc = nvme_ctrlr_construct(&rctrlr->ctrlr);
1791 	if (rc != 0) {
1792 		nvme_rdma_free(rctrlr);
1793 		return NULL;
1794 	}
1795 
1796 	STAILQ_INIT(&rctrlr->pending_cm_events);
1797 	STAILQ_INIT(&rctrlr->free_cm_events);
1798 	rctrlr->cm_events = nvme_rdma_calloc(NVME_RDMA_NUM_CM_EVENTS, sizeof(*rctrlr->cm_events));
1799 	if (rctrlr->cm_events == NULL) {
1800 		SPDK_ERRLOG("unable to allocat buffers to hold CM events.\n");
1801 		goto destruct_ctrlr;
1802 	}
1803 
1804 	for (i = 0; i < NVME_RDMA_NUM_CM_EVENTS; i++) {
1805 		STAILQ_INSERT_TAIL(&rctrlr->free_cm_events, &rctrlr->cm_events[i], link);
1806 	}
1807 
1808 	rctrlr->cm_channel = rdma_create_event_channel();
1809 	if (rctrlr->cm_channel == NULL) {
1810 		SPDK_ERRLOG("rdma_create_event_channel() failed\n");
1811 		goto destruct_ctrlr;
1812 	}
1813 
1814 	flag = fcntl(rctrlr->cm_channel->fd, F_GETFL);
1815 	if (fcntl(rctrlr->cm_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
1816 		SPDK_ERRLOG("Cannot set event channel to non blocking\n");
1817 		goto destruct_ctrlr;
1818 	}
1819 
1820 	rctrlr->ctrlr.adminq = nvme_rdma_ctrlr_create_qpair(&rctrlr->ctrlr, 0,
1821 			       rctrlr->ctrlr.opts.admin_queue_size, 0,
1822 			       rctrlr->ctrlr.opts.admin_queue_size, false);
1823 	if (!rctrlr->ctrlr.adminq) {
1824 		SPDK_ERRLOG("failed to create admin qpair\n");
1825 		goto destruct_ctrlr;
1826 	}
1827 
1828 	if (nvme_ctrlr_add_process(&rctrlr->ctrlr, 0) != 0) {
1829 		SPDK_ERRLOG("nvme_ctrlr_add_process() failed\n");
1830 		goto destruct_ctrlr;
1831 	}
1832 
1833 	SPDK_DEBUGLOG(nvme, "successfully initialized the nvmf ctrlr\n");
1834 	return &rctrlr->ctrlr;
1835 
1836 destruct_ctrlr:
1837 	nvme_ctrlr_destruct(&rctrlr->ctrlr);
1838 	return NULL;
1839 }
1840 
1841 static int
1842 nvme_rdma_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
1843 {
1844 	struct nvme_rdma_ctrlr *rctrlr = nvme_rdma_ctrlr(ctrlr);
1845 	struct nvme_rdma_cm_event_entry *entry;
1846 
1847 	if (ctrlr->adminq) {
1848 		nvme_rdma_ctrlr_delete_io_qpair(ctrlr, ctrlr->adminq);
1849 	}
1850 
1851 	STAILQ_FOREACH(entry, &rctrlr->pending_cm_events, link) {
1852 		rdma_ack_cm_event(entry->evt);
1853 	}
1854 
1855 	STAILQ_INIT(&rctrlr->free_cm_events);
1856 	STAILQ_INIT(&rctrlr->pending_cm_events);
1857 	nvme_rdma_free(rctrlr->cm_events);
1858 
1859 	if (rctrlr->cm_channel) {
1860 		rdma_destroy_event_channel(rctrlr->cm_channel);
1861 		rctrlr->cm_channel = NULL;
1862 	}
1863 
1864 	nvme_ctrlr_destruct_finish(ctrlr);
1865 
1866 	nvme_rdma_free(rctrlr);
1867 
1868 	return 0;
1869 }
1870 
1871 static int
1872 nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
1873 			       struct nvme_request *req)
1874 {
1875 	struct nvme_rdma_qpair *rqpair;
1876 	struct spdk_nvme_rdma_req *rdma_req;
1877 	struct ibv_send_wr *wr;
1878 
1879 	rqpair = nvme_rdma_qpair(qpair);
1880 	assert(rqpair != NULL);
1881 	assert(req != NULL);
1882 
1883 	rdma_req = nvme_rdma_req_get(rqpair);
1884 	if (spdk_unlikely(!rdma_req)) {
1885 		if (rqpair->poller) {
1886 			rqpair->poller->stats.queued_requests++;
1887 		}
1888 		/* Inform the upper layer to try again later. */
1889 		return -EAGAIN;
1890 	}
1891 
1892 	if (nvme_rdma_req_init(rqpair, req, rdma_req)) {
1893 		SPDK_ERRLOG("nvme_rdma_req_init() failed\n");
1894 		TAILQ_REMOVE(&rqpair->outstanding_reqs, rdma_req, link);
1895 		nvme_rdma_req_put(rqpair, rdma_req);
1896 		return -1;
1897 	}
1898 
1899 	wr = &rdma_req->send_wr;
1900 	wr->next = NULL;
1901 	nvme_rdma_trace_ibv_sge(wr->sg_list);
1902 	return nvme_rdma_qpair_queue_send_wr(rqpair, wr);
1903 }
1904 
1905 static int
1906 nvme_rdma_qpair_reset(struct spdk_nvme_qpair *qpair)
1907 {
1908 	/* Currently, doing nothing here */
1909 	return 0;
1910 }
1911 
1912 static void
1913 nvme_rdma_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
1914 {
1915 	struct spdk_nvme_rdma_req *rdma_req, *tmp;
1916 	struct spdk_nvme_cpl cpl;
1917 	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
1918 
1919 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
1920 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
1921 	cpl.status.dnr = dnr;
1922 
1923 	/*
1924 	 * We cannot abort requests at the RDMA layer without
1925 	 * unregistering them. If we do, we can still get error
1926 	 * free completions on the shared completion queue.
1927 	 */
1928 	if (nvme_qpair_get_state(qpair) > NVME_QPAIR_DISCONNECTING &&
1929 	    nvme_qpair_get_state(qpair) != NVME_QPAIR_DESTROYING) {
1930 		nvme_ctrlr_disconnect_qpair(qpair);
1931 	}
1932 
1933 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
1934 		nvme_rdma_req_complete(rdma_req, &cpl);
1935 		nvme_rdma_req_put(rqpair, rdma_req);
1936 	}
1937 }
1938 
1939 static void
1940 nvme_rdma_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
1941 {
1942 	uint64_t t02;
1943 	struct spdk_nvme_rdma_req *rdma_req, *tmp;
1944 	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
1945 	struct spdk_nvme_ctrlr *ctrlr = qpair->ctrlr;
1946 	struct spdk_nvme_ctrlr_process *active_proc;
1947 
1948 	/* Don't check timeouts during controller initialization. */
1949 	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
1950 		return;
1951 	}
1952 
1953 	if (nvme_qpair_is_admin_queue(qpair)) {
1954 		active_proc = nvme_ctrlr_get_current_process(ctrlr);
1955 	} else {
1956 		active_proc = qpair->active_proc;
1957 	}
1958 
1959 	/* Only check timeouts if the current process has a timeout callback. */
1960 	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
1961 		return;
1962 	}
1963 
1964 	t02 = spdk_get_ticks();
1965 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
1966 		assert(rdma_req->req != NULL);
1967 
1968 		if (nvme_request_check_timeout(rdma_req->req, rdma_req->id, active_proc, t02)) {
1969 			/*
1970 			 * The requests are in order, so as soon as one has not timed out,
1971 			 * stop iterating.
1972 			 */
1973 			break;
1974 		}
1975 	}
1976 }
1977 
1978 static inline int
1979 nvme_rdma_request_ready(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req)
1980 {
1981 	nvme_rdma_req_complete(rdma_req, &rqpair->rsps[rdma_req->rsp_idx].cpl);
1982 	nvme_rdma_req_put(rqpair, rdma_req);
1983 	return nvme_rdma_post_recv(rqpair, rdma_req->rsp_idx);
1984 }
1985 
1986 #define MAX_COMPLETIONS_PER_POLL 128
1987 
1988 static void
1989 nvme_rdma_fail_qpair(struct spdk_nvme_qpair *qpair, int failure_reason)
1990 {
1991 	if (failure_reason == IBV_WC_RETRY_EXC_ERR) {
1992 		qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_REMOTE;
1993 	} else if (qpair->transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_NONE) {
1994 		qpair->transport_failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN;
1995 	}
1996 
1997 	nvme_ctrlr_disconnect_qpair(qpair);
1998 }
1999 
2000 static void
2001 nvme_rdma_conditional_fail_qpair(struct nvme_rdma_qpair *rqpair, struct nvme_rdma_poll_group *group)
2002 {
2003 	struct nvme_rdma_destroyed_qpair	*qpair_tracker;
2004 
2005 	assert(rqpair);
2006 	if (group) {
2007 		STAILQ_FOREACH(qpair_tracker, &group->destroyed_qpairs, link) {
2008 			if (qpair_tracker->destroyed_qpair_tracker == rqpair) {
2009 				return;
2010 			}
2011 		}
2012 	}
2013 	nvme_rdma_fail_qpair(&rqpair->qpair, 0);
2014 }
2015 
2016 static int
2017 nvme_rdma_cq_process_completions(struct ibv_cq *cq, uint32_t batch_size,
2018 				 struct nvme_rdma_poll_group *group,
2019 				 struct nvme_rdma_qpair *rdma_qpair,
2020 				 uint64_t *rdma_completions)
2021 {
2022 	struct ibv_wc			wc[MAX_COMPLETIONS_PER_POLL];
2023 	struct nvme_rdma_qpair		*rqpair;
2024 	struct spdk_nvme_rdma_req	*rdma_req;
2025 	struct spdk_nvme_rdma_rsp	*rdma_rsp;
2026 	struct nvme_rdma_wr		*rdma_wr;
2027 	uint32_t			reaped = 0;
2028 	int				completion_rc = 0;
2029 	int				rc, i;
2030 
2031 	rc = ibv_poll_cq(cq, batch_size, wc);
2032 	if (rc < 0) {
2033 		SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
2034 			    errno, spdk_strerror(errno));
2035 		return -ECANCELED;
2036 	} else if (rc == 0) {
2037 		return 0;
2038 	}
2039 
2040 	for (i = 0; i < rc; i++) {
2041 		rdma_wr = (struct nvme_rdma_wr *)wc[i].wr_id;
2042 		switch (rdma_wr->type) {
2043 		case RDMA_WR_TYPE_RECV:
2044 			rdma_rsp = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvme_rdma_rsp, rdma_wr);
2045 			rqpair = rdma_rsp->rqpair;
2046 			assert(rqpair->current_num_recvs > 0);
2047 			rqpair->current_num_recvs--;
2048 
2049 			if (wc[i].status) {
2050 				SPDK_ERRLOG("CQ error on Queue Pair %p, Response Index %lu (%d): %s\n",
2051 					    rqpair, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
2052 				nvme_rdma_conditional_fail_qpair(rqpair, group);
2053 				completion_rc = -ENXIO;
2054 				continue;
2055 			}
2056 
2057 			SPDK_DEBUGLOG(nvme, "CQ recv completion\n");
2058 
2059 			if (wc[i].byte_len < sizeof(struct spdk_nvme_cpl)) {
2060 				SPDK_ERRLOG("recv length %u less than expected response size\n", wc[i].byte_len);
2061 				nvme_rdma_conditional_fail_qpair(rqpair, group);
2062 				completion_rc = -ENXIO;
2063 				continue;
2064 			}
2065 			rdma_req = &rqpair->rdma_reqs[rdma_rsp->cpl.cid];
2066 			rdma_req->completion_flags |= NVME_RDMA_RECV_COMPLETED;
2067 			rdma_req->rsp_idx = rdma_rsp->idx;
2068 
2069 			if ((rdma_req->completion_flags & NVME_RDMA_SEND_COMPLETED) != 0) {
2070 				if (spdk_unlikely(nvme_rdma_request_ready(rqpair, rdma_req))) {
2071 					SPDK_ERRLOG("Unable to re-post rx descriptor\n");
2072 					nvme_rdma_conditional_fail_qpair(rqpair, group);
2073 					completion_rc = -ENXIO;
2074 					continue;
2075 				}
2076 				reaped++;
2077 				rqpair->num_completions++;
2078 			}
2079 			break;
2080 
2081 		case RDMA_WR_TYPE_SEND:
2082 			rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvme_rdma_req, rdma_wr);
2083 
2084 			/* If we are flushing I/O */
2085 			if (wc[i].status) {
2086 				rqpair = rdma_req->req ? nvme_rdma_qpair(rdma_req->req->qpair) : NULL;
2087 				if (!rqpair) {
2088 					rqpair = rdma_qpair != NULL ? rdma_qpair : nvme_rdma_poll_group_get_qpair_by_id(group,
2089 							wc[i].qp_num);
2090 				}
2091 				assert(rqpair);
2092 				assert(rqpair->current_num_sends > 0);
2093 				rqpair->current_num_sends--;
2094 				nvme_rdma_conditional_fail_qpair(rqpair, group);
2095 				SPDK_ERRLOG("CQ error on Queue Pair %p, Response Index %lu (%d): %s\n",
2096 					    rqpair, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
2097 				completion_rc = -ENXIO;
2098 				continue;
2099 			}
2100 
2101 			rqpair = nvme_rdma_qpair(rdma_req->req->qpair);
2102 			rdma_req->completion_flags |= NVME_RDMA_SEND_COMPLETED;
2103 			rqpair->current_num_sends--;
2104 
2105 			if ((rdma_req->completion_flags & NVME_RDMA_RECV_COMPLETED) != 0) {
2106 				if (spdk_unlikely(nvme_rdma_request_ready(rqpair, rdma_req))) {
2107 					SPDK_ERRLOG("Unable to re-post rx descriptor\n");
2108 					nvme_rdma_conditional_fail_qpair(rqpair, group);
2109 					completion_rc = -ENXIO;
2110 					continue;
2111 				}
2112 				reaped++;
2113 				rqpair->num_completions++;
2114 			}
2115 			break;
2116 
2117 		default:
2118 			SPDK_ERRLOG("Received an unexpected opcode on the CQ: %d\n", rdma_wr->type);
2119 			return -ECANCELED;
2120 		}
2121 	}
2122 
2123 	*rdma_completions += rc;
2124 
2125 	if (completion_rc) {
2126 		return completion_rc;
2127 	}
2128 
2129 	return reaped;
2130 }
2131 
2132 static void
2133 dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
2134 {
2135 
2136 }
2137 
2138 static int
2139 nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
2140 				    uint32_t max_completions)
2141 {
2142 	struct nvme_rdma_qpair		*rqpair = nvme_rdma_qpair(qpair);
2143 	int				rc = 0, batch_size;
2144 	struct ibv_cq			*cq;
2145 	struct nvme_rdma_ctrlr		*rctrlr;
2146 	uint64_t			rdma_completions = 0;
2147 
2148 	/*
2149 	 * This is used during the connection phase. It's possible that we are still reaping error completions
2150 	 * from other qpairs so we need to call the poll group function. Also, it's more correct since the cq
2151 	 * is shared.
2152 	 */
2153 	if (qpair->poll_group != NULL) {
2154 		return spdk_nvme_poll_group_process_completions(qpair->poll_group->group, max_completions,
2155 				dummy_disconnected_qpair_cb);
2156 	}
2157 
2158 	if (max_completions == 0) {
2159 		max_completions = rqpair->num_entries;
2160 	} else {
2161 		max_completions = spdk_min(max_completions, rqpair->num_entries);
2162 	}
2163 
2164 	if (nvme_qpair_is_admin_queue(&rqpair->qpair)) {
2165 		rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr);
2166 		nvme_rdma_poll_events(rctrlr);
2167 	}
2168 	nvme_rdma_qpair_process_cm_event(rqpair);
2169 
2170 	if (spdk_unlikely(qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE)) {
2171 		nvme_rdma_fail_qpair(qpair, 0);
2172 		return -ENXIO;
2173 	}
2174 
2175 	cq = rqpair->cq;
2176 
2177 	rqpair->num_completions = 0;
2178 	do {
2179 		batch_size = spdk_min((max_completions - rqpair->num_completions), MAX_COMPLETIONS_PER_POLL);
2180 		rc = nvme_rdma_cq_process_completions(cq, batch_size, NULL, rqpair, &rdma_completions);
2181 
2182 		if (rc == 0) {
2183 			break;
2184 			/* Handle the case where we fail to poll the cq. */
2185 		} else if (rc == -ECANCELED) {
2186 			nvme_rdma_fail_qpair(qpair, 0);
2187 			return -ENXIO;
2188 		} else if (rc == -ENXIO) {
2189 			return rc;
2190 		}
2191 	} while (rqpair->num_completions < max_completions);
2192 
2193 	if (spdk_unlikely(nvme_rdma_qpair_submit_sends(rqpair) ||
2194 			  nvme_rdma_qpair_submit_recvs(rqpair))) {
2195 		nvme_rdma_fail_qpair(qpair, 0);
2196 		return -ENXIO;
2197 	}
2198 
2199 	if (spdk_unlikely(rqpair->qpair.ctrlr->timeout_enabled)) {
2200 		nvme_rdma_qpair_check_timeout(qpair);
2201 	}
2202 
2203 	return rqpair->num_completions;
2204 }
2205 
2206 static uint32_t
2207 nvme_rdma_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
2208 {
2209 	/* max_mr_size by ibv_query_device indicates the largest value that we can
2210 	 * set for a registered memory region.  It is independent from the actual
2211 	 * I/O size and is very likely to be larger than 2 MiB which is the
2212 	 * granularity we currently register memory regions.  Hence return
2213 	 * UINT32_MAX here and let the generic layer use the controller data to
2214 	 * moderate this value.
2215 	 */
2216 	return UINT32_MAX;
2217 }
2218 
2219 static uint16_t
2220 nvme_rdma_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
2221 {
2222 	struct nvme_rdma_ctrlr *rctrlr = nvme_rdma_ctrlr(ctrlr);
2223 
2224 	return rctrlr->max_sge;
2225 }
2226 
2227 static int
2228 nvme_rdma_qpair_iterate_requests(struct spdk_nvme_qpair *qpair,
2229 				 int (*iter_fn)(struct nvme_request *req, void *arg),
2230 				 void *arg)
2231 {
2232 	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
2233 	struct spdk_nvme_rdma_req *rdma_req, *tmp;
2234 	int rc;
2235 
2236 	assert(iter_fn != NULL);
2237 
2238 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
2239 		assert(rdma_req->req != NULL);
2240 
2241 		rc = iter_fn(rdma_req->req, arg);
2242 		if (rc != 0) {
2243 			return rc;
2244 		}
2245 	}
2246 
2247 	return 0;
2248 }
2249 
2250 static void
2251 nvme_rdma_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
2252 {
2253 	struct spdk_nvme_rdma_req *rdma_req, *tmp;
2254 	struct spdk_nvme_cpl cpl;
2255 	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
2256 
2257 	cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
2258 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
2259 
2260 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->outstanding_reqs, link, tmp) {
2261 		assert(rdma_req->req != NULL);
2262 
2263 		if (rdma_req->req->cmd.opc != SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
2264 			continue;
2265 		}
2266 
2267 		nvme_rdma_req_complete(rdma_req, &cpl);
2268 		nvme_rdma_req_put(rqpair, rdma_req);
2269 	}
2270 }
2271 
2272 static int
2273 nvme_rdma_poller_create(struct nvme_rdma_poll_group *group, struct ibv_context *ctx)
2274 {
2275 	struct nvme_rdma_poller *poller;
2276 
2277 	poller = calloc(1, sizeof(*poller));
2278 	if (poller == NULL) {
2279 		SPDK_ERRLOG("Unable to allocate poller.\n");
2280 		return -ENOMEM;
2281 	}
2282 
2283 	poller->device = ctx;
2284 	poller->cq = ibv_create_cq(poller->device, DEFAULT_NVME_RDMA_CQ_SIZE, group, NULL, 0);
2285 
2286 	if (poller->cq == NULL) {
2287 		free(poller);
2288 		return -EINVAL;
2289 	}
2290 
2291 	STAILQ_INSERT_HEAD(&group->pollers, poller, link);
2292 	group->num_pollers++;
2293 	poller->current_num_wc = DEFAULT_NVME_RDMA_CQ_SIZE;
2294 	poller->required_num_wc = 0;
2295 	return 0;
2296 }
2297 
2298 static void
2299 nvme_rdma_poll_group_free_pollers(struct nvme_rdma_poll_group *group)
2300 {
2301 	struct nvme_rdma_poller	*poller, *tmp_poller;
2302 
2303 	STAILQ_FOREACH_SAFE(poller, &group->pollers, link, tmp_poller) {
2304 		if (poller->cq) {
2305 			ibv_destroy_cq(poller->cq);
2306 		}
2307 		STAILQ_REMOVE(&group->pollers, poller, nvme_rdma_poller, link);
2308 		free(poller);
2309 	}
2310 }
2311 
2312 static struct spdk_nvme_transport_poll_group *
2313 nvme_rdma_poll_group_create(void)
2314 {
2315 	struct nvme_rdma_poll_group	*group;
2316 	struct ibv_context		**contexts;
2317 	int i = 0;
2318 
2319 	group = calloc(1, sizeof(*group));
2320 	if (group == NULL) {
2321 		SPDK_ERRLOG("Unable to allocate poll group.\n");
2322 		return NULL;
2323 	}
2324 
2325 	STAILQ_INIT(&group->pollers);
2326 
2327 	contexts = rdma_get_devices(NULL);
2328 	if (contexts == NULL) {
2329 		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
2330 		free(group);
2331 		return NULL;
2332 	}
2333 
2334 	while (contexts[i] != NULL) {
2335 		if (nvme_rdma_poller_create(group, contexts[i])) {
2336 			nvme_rdma_poll_group_free_pollers(group);
2337 			free(group);
2338 			rdma_free_devices(contexts);
2339 			return NULL;
2340 		}
2341 		i++;
2342 	}
2343 
2344 	rdma_free_devices(contexts);
2345 	STAILQ_INIT(&group->destroyed_qpairs);
2346 	return &group->group;
2347 }
2348 
2349 struct nvme_rdma_qpair *
2350 nvme_rdma_poll_group_get_qpair_by_id(struct nvme_rdma_poll_group *group, uint32_t qp_num)
2351 {
2352 	struct spdk_nvme_qpair *qpair;
2353 	struct nvme_rdma_destroyed_qpair *rqpair_tracker;
2354 	struct nvme_rdma_qpair *rqpair;
2355 
2356 	STAILQ_FOREACH(qpair, &group->group.disconnected_qpairs, poll_group_stailq) {
2357 		rqpair = nvme_rdma_qpair(qpair);
2358 		if (rqpair->rdma_qp->qp->qp_num == qp_num) {
2359 			return rqpair;
2360 		}
2361 	}
2362 
2363 	STAILQ_FOREACH(qpair, &group->group.connected_qpairs, poll_group_stailq) {
2364 		rqpair = nvme_rdma_qpair(qpair);
2365 		if (rqpair->rdma_qp->qp->qp_num == qp_num) {
2366 			return rqpair;
2367 		}
2368 	}
2369 
2370 	STAILQ_FOREACH(rqpair_tracker, &group->destroyed_qpairs, link) {
2371 		rqpair = rqpair_tracker->destroyed_qpair_tracker;
2372 		if (rqpair->rdma_qp->qp->qp_num == qp_num) {
2373 			return rqpair;
2374 		}
2375 	}
2376 
2377 	return NULL;
2378 }
2379 
2380 static int
2381 nvme_rdma_resize_cq(struct nvme_rdma_qpair *rqpair, struct nvme_rdma_poller *poller)
2382 {
2383 	int	current_num_wc, required_num_wc;
2384 
2385 	required_num_wc = poller->required_num_wc + WC_PER_QPAIR(rqpair->num_entries);
2386 	current_num_wc = poller->current_num_wc;
2387 	if (current_num_wc < required_num_wc) {
2388 		current_num_wc = spdk_max(current_num_wc * 2, required_num_wc);
2389 	}
2390 
2391 	if (poller->current_num_wc != current_num_wc) {
2392 		SPDK_DEBUGLOG(nvme, "Resize RDMA CQ from %d to %d\n", poller->current_num_wc,
2393 			      current_num_wc);
2394 		if (ibv_resize_cq(poller->cq, current_num_wc)) {
2395 			SPDK_ERRLOG("RDMA CQ resize failed: errno %d: %s\n", errno, spdk_strerror(errno));
2396 			return -1;
2397 		}
2398 
2399 		poller->current_num_wc = current_num_wc;
2400 	}
2401 
2402 	poller->required_num_wc = required_num_wc;
2403 	return 0;
2404 }
2405 
2406 static int
2407 nvme_rdma_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
2408 {
2409 	struct nvme_rdma_qpair		*rqpair = nvme_rdma_qpair(qpair);
2410 	struct nvme_rdma_poll_group	*group = nvme_rdma_poll_group(qpair->poll_group);
2411 	struct nvme_rdma_poller		*poller;
2412 
2413 	assert(rqpair->cq == NULL);
2414 
2415 	STAILQ_FOREACH(poller, &group->pollers, link) {
2416 		if (poller->device == rqpair->cm_id->verbs) {
2417 			if (nvme_rdma_resize_cq(rqpair, poller)) {
2418 				return -EPROTO;
2419 			}
2420 			rqpair->cq = poller->cq;
2421 			rqpair->poller = poller;
2422 			break;
2423 		}
2424 	}
2425 
2426 	if (rqpair->cq == NULL) {
2427 		SPDK_ERRLOG("Unable to find a cq for qpair %p on poll group %p\n", qpair, qpair->poll_group);
2428 		return -EINVAL;
2429 	}
2430 
2431 	return 0;
2432 }
2433 
2434 static int
2435 nvme_rdma_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
2436 {
2437 	struct nvme_rdma_qpair			*rqpair = nvme_rdma_qpair(qpair);
2438 	struct nvme_rdma_poll_group		*group;
2439 	struct nvme_rdma_destroyed_qpair	*destroyed_qpair;
2440 	enum nvme_qpair_state			state;
2441 
2442 	if (rqpair->poll_group_disconnect_in_progress) {
2443 		return -EINPROGRESS;
2444 	}
2445 
2446 	rqpair->poll_group_disconnect_in_progress = true;
2447 	state = nvme_qpair_get_state(qpair);
2448 	group = nvme_rdma_poll_group(qpair->poll_group);
2449 	rqpair->cq = NULL;
2450 
2451 	/*
2452 	 * We want to guard against an endless recursive loop while making
2453 	 * sure the qpair is disconnected before we disconnect it from the qpair.
2454 	 */
2455 	if (state > NVME_QPAIR_DISCONNECTING && state != NVME_QPAIR_DESTROYING) {
2456 		nvme_ctrlr_disconnect_qpair(qpair);
2457 	}
2458 
2459 	/*
2460 	 * If this fails, the system is in serious trouble,
2461 	 * just let the qpair get cleaned up immediately.
2462 	 */
2463 	destroyed_qpair = calloc(1, sizeof(*destroyed_qpair));
2464 	if (destroyed_qpair == NULL) {
2465 		return 0;
2466 	}
2467 
2468 	destroyed_qpair->destroyed_qpair_tracker = rqpair;
2469 	destroyed_qpair->completed_cycles = 0;
2470 	STAILQ_INSERT_TAIL(&group->destroyed_qpairs, destroyed_qpair, link);
2471 
2472 	rqpair->defer_deletion_to_pg = true;
2473 
2474 	rqpair->poll_group_disconnect_in_progress = false;
2475 	return 0;
2476 }
2477 
2478 static int
2479 nvme_rdma_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
2480 			 struct spdk_nvme_qpair *qpair)
2481 {
2482 	return 0;
2483 }
2484 
2485 static int
2486 nvme_rdma_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
2487 			    struct spdk_nvme_qpair *qpair)
2488 {
2489 	if (qpair->poll_group_tailq_head == &tgroup->connected_qpairs) {
2490 		return nvme_poll_group_disconnect_qpair(qpair);
2491 	}
2492 
2493 	return 0;
2494 }
2495 
2496 static void
2497 nvme_rdma_poll_group_delete_qpair(struct nvme_rdma_poll_group *group,
2498 				  struct nvme_rdma_destroyed_qpair *qpair_tracker)
2499 {
2500 	struct nvme_rdma_qpair *rqpair = qpair_tracker->destroyed_qpair_tracker;
2501 
2502 	rqpair->defer_deletion_to_pg = false;
2503 	if (nvme_qpair_get_state(&rqpair->qpair) == NVME_QPAIR_DESTROYING) {
2504 		nvme_rdma_ctrlr_delete_io_qpair(rqpair->qpair.ctrlr, &rqpair->qpair);
2505 	}
2506 	STAILQ_REMOVE(&group->destroyed_qpairs, qpair_tracker, nvme_rdma_destroyed_qpair, link);
2507 	free(qpair_tracker);
2508 }
2509 
2510 static int64_t
2511 nvme_rdma_poll_group_process_completions(struct spdk_nvme_transport_poll_group *tgroup,
2512 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
2513 {
2514 	struct spdk_nvme_qpair			*qpair, *tmp_qpair;
2515 	struct nvme_rdma_destroyed_qpair	*qpair_tracker, *tmp_qpair_tracker;
2516 	struct nvme_rdma_qpair			*rqpair;
2517 	struct nvme_rdma_poll_group		*group;
2518 	struct nvme_rdma_poller			*poller;
2519 	int					num_qpairs = 0, batch_size, rc;
2520 	int64_t					total_completions = 0;
2521 	uint64_t				completions_allowed = 0;
2522 	uint64_t				completions_per_poller = 0;
2523 	uint64_t				poller_completions = 0;
2524 	uint64_t				rdma_completions;
2525 
2526 
2527 	if (completions_per_qpair == 0) {
2528 		completions_per_qpair = MAX_COMPLETIONS_PER_POLL;
2529 	}
2530 
2531 	group = nvme_rdma_poll_group(tgroup);
2532 	STAILQ_FOREACH_SAFE(qpair, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_qpair) {
2533 		disconnected_qpair_cb(qpair, tgroup->group->ctx);
2534 	}
2535 
2536 	STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
2537 		rqpair = nvme_rdma_qpair(qpair);
2538 		rqpair->num_completions = 0;
2539 		nvme_rdma_qpair_process_cm_event(rqpair);
2540 
2541 		if (spdk_unlikely(qpair->transport_failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE)) {
2542 			nvme_rdma_fail_qpair(qpair, 0);
2543 			disconnected_qpair_cb(qpair, tgroup->group->ctx);
2544 			continue;
2545 		}
2546 		num_qpairs++;
2547 	}
2548 
2549 	completions_allowed = completions_per_qpair * num_qpairs;
2550 	completions_per_poller = spdk_max(completions_allowed / group->num_pollers, 1);
2551 
2552 	STAILQ_FOREACH(poller, &group->pollers, link) {
2553 		poller_completions = 0;
2554 		rdma_completions = 0;
2555 		do {
2556 			poller->stats.polls++;
2557 			batch_size = spdk_min((completions_per_poller - poller_completions), MAX_COMPLETIONS_PER_POLL);
2558 			rc = nvme_rdma_cq_process_completions(poller->cq, batch_size, group, NULL, &rdma_completions);
2559 			if (rc <= 0) {
2560 				if (rc == -ECANCELED) {
2561 					return -EIO;
2562 				} else if (rc == 0) {
2563 					poller->stats.idle_polls++;
2564 				}
2565 				break;
2566 			}
2567 
2568 			poller_completions += rc;
2569 		} while (poller_completions < completions_per_poller);
2570 		total_completions += poller_completions;
2571 		poller->stats.completions += rdma_completions;
2572 	}
2573 
2574 	STAILQ_FOREACH_SAFE(qpair, &tgroup->connected_qpairs, poll_group_stailq, tmp_qpair) {
2575 		rqpair = nvme_rdma_qpair(qpair);
2576 		if (spdk_unlikely(qpair->ctrlr->timeout_enabled)) {
2577 			nvme_rdma_qpair_check_timeout(qpair);
2578 		}
2579 
2580 		nvme_rdma_qpair_submit_sends(rqpair);
2581 		nvme_rdma_qpair_submit_recvs(rqpair);
2582 		nvme_qpair_resubmit_requests(&rqpair->qpair, rqpair->num_completions);
2583 	}
2584 
2585 	/*
2586 	 * Once a qpair is disconnected, we can still get flushed completions for those disconnected qpairs.
2587 	 * For most pieces of hardware, those requests will complete immediately. However, there are certain
2588 	 * cases where flushed requests will linger. Default is to destroy qpair after all completions are freed,
2589 	 * but have a fallback for other cases where we don't get all of our completions back.
2590 	 */
2591 	STAILQ_FOREACH_SAFE(qpair_tracker, &group->destroyed_qpairs, link, tmp_qpair_tracker) {
2592 		qpair_tracker->completed_cycles++;
2593 		rqpair = qpair_tracker->destroyed_qpair_tracker;
2594 		if ((rqpair->current_num_sends == 0 && rqpair->current_num_recvs == 0) ||
2595 		    qpair_tracker->completed_cycles > NVME_RDMA_DESTROYED_QPAIR_EXPIRATION_CYCLES) {
2596 			nvme_rdma_poll_group_delete_qpair(group, qpair_tracker);
2597 		}
2598 	}
2599 
2600 	return total_completions;
2601 }
2602 
2603 static int
2604 nvme_rdma_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
2605 {
2606 	struct nvme_rdma_poll_group		*group = nvme_rdma_poll_group(tgroup);
2607 	struct nvme_rdma_destroyed_qpair	*qpair_tracker, *tmp_qpair_tracker;
2608 	struct nvme_rdma_qpair			*rqpair;
2609 
2610 	if (!STAILQ_EMPTY(&tgroup->connected_qpairs) || !STAILQ_EMPTY(&tgroup->disconnected_qpairs)) {
2611 		return -EBUSY;
2612 	}
2613 
2614 	STAILQ_FOREACH_SAFE(qpair_tracker, &group->destroyed_qpairs, link, tmp_qpair_tracker) {
2615 		rqpair = qpair_tracker->destroyed_qpair_tracker;
2616 		if (nvme_qpair_get_state(&rqpair->qpair) == NVME_QPAIR_DESTROYING) {
2617 			rqpair->defer_deletion_to_pg = false;
2618 			nvme_rdma_ctrlr_delete_io_qpair(rqpair->qpair.ctrlr, &rqpair->qpair);
2619 		}
2620 
2621 		STAILQ_REMOVE(&group->destroyed_qpairs, qpair_tracker, nvme_rdma_destroyed_qpair, link);
2622 		free(qpair_tracker);
2623 	}
2624 
2625 	nvme_rdma_poll_group_free_pollers(group);
2626 	free(group);
2627 
2628 	return 0;
2629 }
2630 
2631 static int
2632 nvme_rdma_poll_group_get_stats(struct spdk_nvme_transport_poll_group *tgroup,
2633 			       struct spdk_nvme_transport_poll_group_stat **_stats)
2634 {
2635 	struct nvme_rdma_poll_group *group;
2636 	struct spdk_nvme_transport_poll_group_stat *stats;
2637 	struct spdk_nvme_rdma_device_stat *device_stat;
2638 	struct nvme_rdma_poller *poller;
2639 	uint32_t i = 0;
2640 
2641 	if (tgroup == NULL || _stats == NULL) {
2642 		SPDK_ERRLOG("Invalid stats or group pointer\n");
2643 		return -EINVAL;
2644 	}
2645 
2646 	group = nvme_rdma_poll_group(tgroup);
2647 	stats = calloc(1, sizeof(*stats));
2648 	if (!stats) {
2649 		SPDK_ERRLOG("Can't allocate memory for RDMA stats\n");
2650 		return -ENOMEM;
2651 	}
2652 	stats->trtype = SPDK_NVME_TRANSPORT_RDMA;
2653 	stats->rdma.num_devices = group->num_pollers;
2654 	stats->rdma.device_stats = calloc(stats->rdma.num_devices, sizeof(*stats->rdma.device_stats));
2655 	if (!stats->rdma.device_stats) {
2656 		SPDK_ERRLOG("Can't allocate memory for RDMA device stats\n");
2657 		free(stats);
2658 		return -ENOMEM;
2659 	}
2660 
2661 	STAILQ_FOREACH(poller, &group->pollers, link) {
2662 		device_stat = &stats->rdma.device_stats[i];
2663 		device_stat->name = poller->device->device->name;
2664 		device_stat->polls = poller->stats.polls;
2665 		device_stat->idle_polls = poller->stats.idle_polls;
2666 		device_stat->completions = poller->stats.completions;
2667 		device_stat->queued_requests = poller->stats.queued_requests;
2668 		device_stat->total_send_wrs = poller->stats.rdma_stats.send.num_submitted_wrs;
2669 		device_stat->send_doorbell_updates = poller->stats.rdma_stats.send.doorbell_updates;
2670 		device_stat->total_recv_wrs = poller->stats.rdma_stats.recv.num_submitted_wrs;
2671 		device_stat->recv_doorbell_updates = poller->stats.rdma_stats.recv.doorbell_updates;
2672 		i++;
2673 	}
2674 
2675 	*_stats = stats;
2676 
2677 	return 0;
2678 }
2679 
2680 static void
2681 nvme_rdma_poll_group_free_stats(struct spdk_nvme_transport_poll_group *tgroup,
2682 				struct spdk_nvme_transport_poll_group_stat *stats)
2683 {
2684 	if (stats) {
2685 		free(stats->rdma.device_stats);
2686 	}
2687 	free(stats);
2688 }
2689 
2690 void
2691 spdk_nvme_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
2692 {
2693 	g_nvme_hooks = *hooks;
2694 }
2695 
2696 const struct spdk_nvme_transport_ops rdma_ops = {
2697 	.name = "RDMA",
2698 	.type = SPDK_NVME_TRANSPORT_RDMA,
2699 	.ctrlr_construct = nvme_rdma_ctrlr_construct,
2700 	.ctrlr_scan = nvme_fabric_ctrlr_scan,
2701 	.ctrlr_destruct = nvme_rdma_ctrlr_destruct,
2702 	.ctrlr_enable = nvme_rdma_ctrlr_enable,
2703 
2704 	.ctrlr_set_reg_4 = nvme_fabric_ctrlr_set_reg_4,
2705 	.ctrlr_set_reg_8 = nvme_fabric_ctrlr_set_reg_8,
2706 	.ctrlr_get_reg_4 = nvme_fabric_ctrlr_get_reg_4,
2707 	.ctrlr_get_reg_8 = nvme_fabric_ctrlr_get_reg_8,
2708 
2709 	.ctrlr_get_max_xfer_size = nvme_rdma_ctrlr_get_max_xfer_size,
2710 	.ctrlr_get_max_sges = nvme_rdma_ctrlr_get_max_sges,
2711 
2712 	.ctrlr_create_io_qpair = nvme_rdma_ctrlr_create_io_qpair,
2713 	.ctrlr_delete_io_qpair = nvme_rdma_ctrlr_delete_io_qpair,
2714 	.ctrlr_connect_qpair = nvme_rdma_ctrlr_connect_qpair,
2715 	.ctrlr_disconnect_qpair = nvme_rdma_ctrlr_disconnect_qpair,
2716 
2717 	.qpair_abort_reqs = nvme_rdma_qpair_abort_reqs,
2718 	.qpair_reset = nvme_rdma_qpair_reset,
2719 	.qpair_submit_request = nvme_rdma_qpair_submit_request,
2720 	.qpair_process_completions = nvme_rdma_qpair_process_completions,
2721 	.qpair_iterate_requests = nvme_rdma_qpair_iterate_requests,
2722 	.admin_qpair_abort_aers = nvme_rdma_admin_qpair_abort_aers,
2723 
2724 	.poll_group_create = nvme_rdma_poll_group_create,
2725 	.poll_group_connect_qpair = nvme_rdma_poll_group_connect_qpair,
2726 	.poll_group_disconnect_qpair = nvme_rdma_poll_group_disconnect_qpair,
2727 	.poll_group_add = nvme_rdma_poll_group_add,
2728 	.poll_group_remove = nvme_rdma_poll_group_remove,
2729 	.poll_group_process_completions = nvme_rdma_poll_group_process_completions,
2730 	.poll_group_destroy = nvme_rdma_poll_group_destroy,
2731 	.poll_group_get_stats = nvme_rdma_poll_group_get_stats,
2732 	.poll_group_free_stats = nvme_rdma_poll_group_free_stats,
2733 };
2734 
2735 SPDK_NVME_TRANSPORT_REGISTER(rdma, &rdma_ops);
2736