xref: /spdk/lib/nvmf/rdma.c (revision 17e9d38c5b101ec705e80cd015d93df97e267f03)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <infiniband/verbs.h>
37 #include <rdma/rdma_cma.h>
38 #include <rdma/rdma_verbs.h>
39 
40 #include "nvmf_internal.h"
41 #include "transport.h"
42 
43 #include "spdk/assert.h"
44 #include "spdk/thread.h"
45 #include "spdk/nvmf.h"
46 #include "spdk/nvmf_spec.h"
47 #include "spdk/string.h"
48 #include "spdk/trace.h"
49 #include "spdk/util.h"
50 
51 #include "spdk_internal/log.h"
52 
53 /*
54  RDMA Connection Resource Defaults
55  */
56 #define NVMF_DEFAULT_TX_SGE		1
57 #define NVMF_DEFAULT_RX_SGE		2
58 #define NVMF_DEFAULT_DATA_SGE		16
59 
60 /* The RDMA completion queue size */
61 #define NVMF_RDMA_CQ_SIZE	4096
62 
63 /* AIO backend requires block size aligned data buffers,
64  * extra 4KiB aligned data buffer should work for most devices.
65  */
66 #define SHIFT_4KB			12
67 #define NVMF_DATA_BUFFER_ALIGNMENT	(1 << SHIFT_4KB)
68 #define NVMF_DATA_BUFFER_MASK		(NVMF_DATA_BUFFER_ALIGNMENT - 1)
69 
70 enum spdk_nvmf_rdma_request_state {
71 	/* The request is not currently in use */
72 	RDMA_REQUEST_STATE_FREE = 0,
73 
74 	/* Initial state when request first received */
75 	RDMA_REQUEST_STATE_NEW,
76 
77 	/* The request is queued until a data buffer is available. */
78 	RDMA_REQUEST_STATE_NEED_BUFFER,
79 
80 	/* The request is waiting on RDMA queue depth availability
81 	 * to transfer data between the host and the controller.
82 	 */
83 	RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING,
84 
85 	/* The request is currently transferring data from the host to the controller. */
86 	RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
87 
88 	/* The request is ready to execute at the block device */
89 	RDMA_REQUEST_STATE_READY_TO_EXECUTE,
90 
91 	/* The request is currently executing at the block device */
92 	RDMA_REQUEST_STATE_EXECUTING,
93 
94 	/* The request finished executing at the block device */
95 	RDMA_REQUEST_STATE_EXECUTED,
96 
97 	/* The request is ready to send a completion */
98 	RDMA_REQUEST_STATE_READY_TO_COMPLETE,
99 
100 	/* The request is currently transferring data from the controller to the host. */
101 	RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
102 
103 	/* The request currently has an outstanding completion without an
104 	 * associated data transfer.
105 	 */
106 	RDMA_REQUEST_STATE_COMPLETING,
107 
108 	/* The request completed and can be marked free. */
109 	RDMA_REQUEST_STATE_COMPLETED,
110 
111 	/* Terminator */
112 	RDMA_REQUEST_NUM_STATES,
113 };
114 
115 #define OBJECT_NVMF_RDMA_IO				0x40
116 
117 #define									TRACE_GROUP_NVMF_RDMA 0x4
118 #define TRACE_RDMA_REQUEST_STATE_NEW					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x0)
119 #define TRACE_RDMA_REQUEST_STATE_NEED_BUFFER				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x1)
120 #define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x2)
121 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER	SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x3)
122 #define TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x4)
123 #define TRACE_RDMA_REQUEST_STATE_EXECUTING				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x5)
124 #define TRACE_RDMA_REQUEST_STATE_EXECUTED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x6)
125 #define TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x7)
126 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST	SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8)
127 #define TRACE_RDMA_REQUEST_STATE_COMPLETING				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9)
128 #define TRACE_RDMA_REQUEST_STATE_COMPLETED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA)
129 
130 SPDK_TRACE_REGISTER_FN(nvmf_trace)
131 {
132 	spdk_trace_register_object(OBJECT_NVMF_RDMA_IO, 'r');
133 	spdk_trace_register_description("RDMA_REQ_NEW", "",
134 					TRACE_RDMA_REQUEST_STATE_NEW,
135 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 1, 0, 0, "");
136 	spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", "",
137 					TRACE_RDMA_REQUEST_STATE_NEED_BUFFER,
138 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
139 	spdk_trace_register_description("RDMA_REQ_TX_PENDING_H_TO_C", "",
140 					TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING,
141 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
142 	spdk_trace_register_description("RDMA_REQ_TX_H_TO_C", "",
143 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
144 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
145 	spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE", "",
146 					TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE,
147 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
148 	spdk_trace_register_description("RDMA_REQ_EXECUTING", "",
149 					TRACE_RDMA_REQUEST_STATE_EXECUTING,
150 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
151 	spdk_trace_register_description("RDMA_REQ_EXECUTED", "",
152 					TRACE_RDMA_REQUEST_STATE_EXECUTED,
153 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
154 	spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPLETE", "",
155 					TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE,
156 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
157 	spdk_trace_register_description("RDMA_REQ_COMPLETING_CONTROLLER_TO_HOST", "",
158 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
159 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
160 	spdk_trace_register_description("RDMA_REQ_COMPLETING_INCAPSULE", "",
161 					TRACE_RDMA_REQUEST_STATE_COMPLETING,
162 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
163 	spdk_trace_register_description("RDMA_REQ_COMPLETED", "",
164 					TRACE_RDMA_REQUEST_STATE_COMPLETED,
165 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 0, 0, "");
166 }
167 
168 /* This structure holds commands as they are received off the wire.
169  * It must be dynamically paired with a full request object
170  * (spdk_nvmf_rdma_request) to service a request. It is separate
171  * from the request because RDMA does not appear to order
172  * completions, so occasionally we'll get a new incoming
173  * command when there aren't any free request objects.
174  */
175 struct spdk_nvmf_rdma_recv {
176 	struct ibv_recv_wr		wr;
177 	struct ibv_sge			sgl[NVMF_DEFAULT_RX_SGE];
178 
179 	struct spdk_nvmf_rdma_qpair	*qpair;
180 
181 	/* In-capsule data buffer */
182 	uint8_t				*buf;
183 
184 	TAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
185 };
186 
187 struct spdk_nvmf_rdma_request {
188 	struct spdk_nvmf_request		req;
189 	bool					data_from_pool;
190 
191 	enum spdk_nvmf_rdma_request_state	state;
192 
193 	struct spdk_nvmf_rdma_recv		*recv;
194 
195 	struct {
196 		struct	ibv_send_wr		wr;
197 		struct	ibv_sge			sgl[NVMF_DEFAULT_TX_SGE];
198 	} rsp;
199 
200 	struct {
201 		struct ibv_send_wr		wr;
202 		struct ibv_sge			sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
203 		void				*buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
204 	} data;
205 
206 	TAILQ_ENTRY(spdk_nvmf_rdma_request)	link;
207 	TAILQ_ENTRY(spdk_nvmf_rdma_request)	state_link;
208 };
209 
210 struct spdk_nvmf_rdma_qpair {
211 	struct spdk_nvmf_qpair			qpair;
212 
213 	struct spdk_nvmf_rdma_port		*port;
214 	struct spdk_nvmf_rdma_poller		*poller;
215 
216 	struct rdma_cm_id			*cm_id;
217 
218 	/* The maximum number of I/O outstanding on this connection at one time */
219 	uint16_t				max_queue_depth;
220 
221 	/* The maximum number of active RDMA READ and WRITE operations at one time */
222 	uint16_t				max_rw_depth;
223 
224 	/* Receives that are waiting for a request object */
225 	TAILQ_HEAD(, spdk_nvmf_rdma_recv)	incoming_queue;
226 
227 	/* Queues to track the requests in all states */
228 	TAILQ_HEAD(, spdk_nvmf_rdma_request)	state_queue[RDMA_REQUEST_NUM_STATES];
229 
230 	/* Number of requests in each state */
231 	uint32_t				state_cntr[RDMA_REQUEST_NUM_STATES];
232 
233 	int                                     max_sge;
234 
235 	/* Array of size "max_queue_depth" containing RDMA requests. */
236 	struct spdk_nvmf_rdma_request		*reqs;
237 
238 	/* Array of size "max_queue_depth" containing RDMA recvs. */
239 	struct spdk_nvmf_rdma_recv		*recvs;
240 
241 	/* Array of size "max_queue_depth" containing 64 byte capsules
242 	 * used for receive.
243 	 */
244 	union nvmf_h2c_msg			*cmds;
245 	struct ibv_mr				*cmds_mr;
246 
247 	/* Array of size "max_queue_depth" containing 16 byte completions
248 	 * to be sent back to the user.
249 	 */
250 	union nvmf_c2h_msg			*cpls;
251 	struct ibv_mr				*cpls_mr;
252 
253 	/* Array of size "max_queue_depth * InCapsuleDataSize" containing
254 	 * buffers to be used for in capsule data.
255 	 */
256 	void					*bufs;
257 	struct ibv_mr				*bufs_mr;
258 
259 	TAILQ_ENTRY(spdk_nvmf_rdma_qpair)	link;
260 
261 	/* Mgmt channel */
262 	struct spdk_io_channel			*mgmt_channel;
263 	struct spdk_nvmf_rdma_mgmt_channel	*ch;
264 
265 	/* IBV queue pair attributes: they are used to manage
266 	 * qp state and recover from errors.
267 	 */
268 	struct ibv_qp_init_attr			ibv_init_attr;
269 	struct ibv_qp_attr			ibv_attr;
270 
271 	bool					qpair_disconnected;
272 };
273 
274 struct spdk_nvmf_rdma_poller {
275 	struct spdk_nvmf_rdma_device		*device;
276 	struct spdk_nvmf_rdma_poll_group	*group;
277 
278 	struct ibv_cq				*cq;
279 
280 	TAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs;
281 
282 	TAILQ_ENTRY(spdk_nvmf_rdma_poller)	link;
283 };
284 
285 struct spdk_nvmf_rdma_poll_group {
286 	struct spdk_nvmf_transport_poll_group	group;
287 
288 	TAILQ_HEAD(, spdk_nvmf_rdma_poller)	pollers;
289 };
290 
291 /* Assuming rdma_cm uses just one protection domain per ibv_context. */
292 struct spdk_nvmf_rdma_device {
293 	struct ibv_device_attr			attr;
294 	struct ibv_context			*context;
295 
296 	struct spdk_mem_map			*map;
297 	struct ibv_pd				*pd;
298 
299 	TAILQ_ENTRY(spdk_nvmf_rdma_device)	link;
300 };
301 
302 struct spdk_nvmf_rdma_port {
303 	struct spdk_nvme_transport_id		trid;
304 	struct rdma_cm_id			*id;
305 	struct spdk_nvmf_rdma_device		*device;
306 	uint32_t				ref;
307 	TAILQ_ENTRY(spdk_nvmf_rdma_port)	link;
308 };
309 
310 struct spdk_nvmf_rdma_transport {
311 	struct spdk_nvmf_transport	transport;
312 
313 	struct rdma_event_channel	*event_channel;
314 
315 	struct spdk_mempool		*data_buf_pool;
316 
317 	pthread_mutex_t			lock;
318 
319 	/* fields used to poll RDMA/IB events */
320 	nfds_t			npoll_fds;
321 	struct pollfd		*poll_fds;
322 
323 	TAILQ_HEAD(, spdk_nvmf_rdma_device)	devices;
324 	TAILQ_HEAD(, spdk_nvmf_rdma_port)	ports;
325 };
326 
327 struct spdk_nvmf_rdma_mgmt_channel {
328 	/* Requests that are waiting to obtain a data buffer */
329 	TAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_data_buf_queue;
330 };
331 
332 /* API to IBV QueuePair */
333 static const char *str_ibv_qp_state[] = {
334 	"IBV_QPS_RESET",
335 	"IBV_QPS_INIT",
336 	"IBV_QPS_RTR",
337 	"IBV_QPS_RTS",
338 	"IBV_QPS_SQD",
339 	"IBV_QPS_SQE",
340 	"IBV_QPS_ERR"
341 };
342 
343 static enum ibv_qp_state
344 spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
345 	int rc;
346 
347 	/* All the attributes needed for recovery */
348 	static int spdk_nvmf_ibv_attr_mask =
349 	IBV_QP_STATE |
350 	IBV_QP_PKEY_INDEX |
351 	IBV_QP_PORT |
352 	IBV_QP_ACCESS_FLAGS |
353 	IBV_QP_AV |
354 	IBV_QP_PATH_MTU |
355 	IBV_QP_DEST_QPN |
356 	IBV_QP_RQ_PSN |
357 	IBV_QP_MAX_DEST_RD_ATOMIC |
358 	IBV_QP_MIN_RNR_TIMER |
359 	IBV_QP_SQ_PSN |
360 	IBV_QP_TIMEOUT |
361 	IBV_QP_RETRY_CNT |
362 	IBV_QP_RNR_RETRY |
363 	IBV_QP_MAX_QP_RD_ATOMIC;
364 
365 	rc = ibv_query_qp(rqpair->cm_id->qp, &rqpair->ibv_attr,
366 			  spdk_nvmf_ibv_attr_mask, &rqpair->ibv_init_attr);
367 
368 	if (rc)
369 	{
370 		SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
371 		assert(false);
372 	}
373 
374 	return rqpair->ibv_attr.qp_state;
375 }
376 
377 static int
378 spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair,
379 			     enum ibv_qp_state new_state)
380 {
381 	int rc;
382 	enum ibv_qp_state state;
383 	static int attr_mask_rc[] = {
384 		[IBV_QPS_RESET] = IBV_QP_STATE,
385 		[IBV_QPS_INIT] = (IBV_QP_STATE |
386 				  IBV_QP_PKEY_INDEX |
387 				  IBV_QP_PORT |
388 				  IBV_QP_ACCESS_FLAGS),
389 		[IBV_QPS_RTR] = (IBV_QP_STATE |
390 				 IBV_QP_AV |
391 				 IBV_QP_PATH_MTU |
392 				 IBV_QP_DEST_QPN |
393 				 IBV_QP_RQ_PSN |
394 				 IBV_QP_MAX_DEST_RD_ATOMIC |
395 				 IBV_QP_MIN_RNR_TIMER),
396 		[IBV_QPS_RTS] = (IBV_QP_STATE |
397 				 IBV_QP_SQ_PSN |
398 				 IBV_QP_TIMEOUT |
399 				 IBV_QP_RETRY_CNT |
400 				 IBV_QP_RNR_RETRY |
401 				 IBV_QP_MAX_QP_RD_ATOMIC),
402 		[IBV_QPS_SQD] = IBV_QP_STATE,
403 		[IBV_QPS_SQE] = IBV_QP_STATE,
404 		[IBV_QPS_ERR] = IBV_QP_STATE,
405 	};
406 
407 	switch (new_state) {
408 	case IBV_QPS_RESET:
409 	case IBV_QPS_INIT:
410 	case IBV_QPS_RTR:
411 	case IBV_QPS_RTS:
412 	case IBV_QPS_SQD:
413 	case IBV_QPS_SQE:
414 	case IBV_QPS_ERR:
415 		break;
416 	default:
417 		SPDK_ERRLOG("QP#%d: bad state requested: %u\n",
418 			    rqpair->qpair.qid, new_state);
419 		return -1;
420 	}
421 	rqpair->ibv_attr.cur_qp_state = rqpair->ibv_attr.qp_state;
422 	rqpair->ibv_attr.qp_state = new_state;
423 	rqpair->ibv_attr.ah_attr.port_num = rqpair->ibv_attr.port_num;
424 
425 	rc = ibv_modify_qp(rqpair->cm_id->qp, &rqpair->ibv_attr,
426 			   attr_mask_rc[new_state]);
427 
428 	if (rc) {
429 		SPDK_ERRLOG("QP#%d: failed to set state to: %s, %d (%s)\n",
430 			    rqpair->qpair.qid, str_ibv_qp_state[new_state], errno, strerror(errno));
431 		return rc;
432 	}
433 
434 	state = spdk_nvmf_rdma_update_ibv_state(rqpair);
435 
436 	if (state != new_state) {
437 		SPDK_ERRLOG("QP#%d: expected state: %s, actual state: %s\n",
438 			    rqpair->qpair.qid, str_ibv_qp_state[new_state],
439 			    str_ibv_qp_state[state]);
440 		return -1;
441 	}
442 	SPDK_NOTICELOG("IBV QP#%u changed to: %s\n", rqpair->qpair.qid,
443 		       str_ibv_qp_state[state]);
444 	return 0;
445 }
446 
447 static void
448 spdk_nvmf_rdma_request_set_state(struct spdk_nvmf_rdma_request *rdma_req,
449 				 enum spdk_nvmf_rdma_request_state state)
450 {
451 	struct spdk_nvmf_qpair		*qpair;
452 	struct spdk_nvmf_rdma_qpair	*rqpair;
453 
454 	qpair = rdma_req->req.qpair;
455 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
456 
457 	TAILQ_REMOVE(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
458 	rqpair->state_cntr[rdma_req->state]--;
459 
460 	rdma_req->state = state;
461 
462 	TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
463 	rqpair->state_cntr[rdma_req->state]++;
464 }
465 
466 static int
467 spdk_nvmf_rdma_mgmt_channel_create(void *io_device, void *ctx_buf)
468 {
469 	struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
470 
471 	TAILQ_INIT(&ch->pending_data_buf_queue);
472 	return 0;
473 }
474 
475 static void
476 spdk_nvmf_rdma_mgmt_channel_destroy(void *io_device, void *ctx_buf)
477 {
478 	struct spdk_nvmf_rdma_mgmt_channel *ch = ctx_buf;
479 
480 	if (!TAILQ_EMPTY(&ch->pending_data_buf_queue)) {
481 		SPDK_ERRLOG("Pending I/O list wasn't empty on channel destruction\n");
482 	}
483 }
484 
485 static int
486 spdk_nvmf_rdma_cur_rw_depth(struct spdk_nvmf_rdma_qpair *rqpair)
487 {
488 	return rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER] +
489 	       rqpair->state_cntr[RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST];
490 }
491 
492 static int
493 spdk_nvmf_rdma_cur_queue_depth(struct spdk_nvmf_rdma_qpair *rqpair)
494 {
495 	return rqpair->max_queue_depth -
496 	       rqpair->state_cntr[RDMA_REQUEST_STATE_FREE];
497 }
498 
499 static void
500 spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
501 {
502 	if (spdk_nvmf_rdma_cur_queue_depth(rqpair)) {
503 		rqpair->qpair_disconnected = true;
504 		return;
505 	}
506 
507 	if (rqpair->poller) {
508 		TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
509 	}
510 
511 	if (rqpair->cmds_mr) {
512 		ibv_dereg_mr(rqpair->cmds_mr);
513 	}
514 
515 	if (rqpair->cpls_mr) {
516 		ibv_dereg_mr(rqpair->cpls_mr);
517 	}
518 
519 	if (rqpair->bufs_mr) {
520 		ibv_dereg_mr(rqpair->bufs_mr);
521 	}
522 
523 	if (rqpair->cm_id) {
524 		rdma_destroy_qp(rqpair->cm_id);
525 		rdma_destroy_id(rqpair->cm_id);
526 	}
527 
528 	if (rqpair->mgmt_channel) {
529 		spdk_put_io_channel(rqpair->mgmt_channel);
530 	}
531 
532 	/* Free all memory */
533 	spdk_dma_free(rqpair->cmds);
534 	spdk_dma_free(rqpair->cpls);
535 	spdk_dma_free(rqpair->bufs);
536 	free(rqpair->reqs);
537 	free(rqpair->recvs);
538 	free(rqpair);
539 }
540 
541 static int
542 spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
543 {
544 	struct spdk_nvmf_rdma_transport *rtransport;
545 	struct spdk_nvmf_rdma_qpair	*rqpair;
546 	int				rc, i;
547 	struct spdk_nvmf_rdma_recv	*rdma_recv;
548 	struct spdk_nvmf_rdma_request	*rdma_req;
549 	struct spdk_nvmf_transport      *transport;
550 
551 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
552 	rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
553 	transport = &rtransport->transport;
554 
555 	memset(&rqpair->ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
556 	rqpair->ibv_init_attr.qp_context	= rqpair;
557 	rqpair->ibv_init_attr.qp_type		= IBV_QPT_RC;
558 	rqpair->ibv_init_attr.send_cq		= rqpair->poller->cq;
559 	rqpair->ibv_init_attr.recv_cq		= rqpair->poller->cq;
560 	rqpair->ibv_init_attr.cap.max_send_wr	= rqpair->max_queue_depth *
561 			2; /* SEND, READ, and WRITE operations */
562 	rqpair->ibv_init_attr.cap.max_recv_wr	= rqpair->max_queue_depth; /* RECV operations */
563 	rqpair->ibv_init_attr.cap.max_send_sge	= rqpair->max_sge;
564 	rqpair->ibv_init_attr.cap.max_recv_sge	= NVMF_DEFAULT_RX_SGE;
565 
566 	rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &rqpair->ibv_init_attr);
567 	if (rc) {
568 		SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno));
569 		rdma_destroy_id(rqpair->cm_id);
570 		rqpair->cm_id = NULL;
571 		spdk_nvmf_rdma_qpair_destroy(rqpair);
572 		return -1;
573 	}
574 
575 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
576 
577 	rqpair->reqs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->reqs));
578 	rqpair->recvs = calloc(rqpair->max_queue_depth, sizeof(*rqpair->recvs));
579 	rqpair->cmds = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cmds),
580 					0x1000, NULL);
581 	rqpair->cpls = spdk_dma_zmalloc(rqpair->max_queue_depth * sizeof(*rqpair->cpls),
582 					0x1000, NULL);
583 
584 
585 	if (transport->opts.in_capsule_data_size > 0) {
586 		rqpair->bufs = spdk_dma_zmalloc(rqpair->max_queue_depth *
587 						transport->opts.in_capsule_data_size,
588 						0x1000, NULL);
589 	}
590 
591 	if (!rqpair->reqs || !rqpair->recvs || !rqpair->cmds ||
592 	    !rqpair->cpls || (transport->opts.in_capsule_data_size && !rqpair->bufs)) {
593 		SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
594 		spdk_nvmf_rdma_qpair_destroy(rqpair);
595 		return -1;
596 	}
597 
598 	rqpair->cmds_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cmds,
599 				     rqpair->max_queue_depth * sizeof(*rqpair->cmds),
600 				     IBV_ACCESS_LOCAL_WRITE);
601 	rqpair->cpls_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->cpls,
602 				     rqpair->max_queue_depth * sizeof(*rqpair->cpls),
603 				     0);
604 
605 	if (transport->opts.in_capsule_data_size) {
606 		rqpair->bufs_mr = ibv_reg_mr(rqpair->cm_id->pd, rqpair->bufs,
607 					     rqpair->max_queue_depth *
608 					     transport->opts.in_capsule_data_size,
609 					     IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
610 	}
611 
612 	if (!rqpair->cmds_mr || !rqpair->cpls_mr || (transport->opts.in_capsule_data_size &&
613 			!rqpair->bufs_mr)) {
614 		SPDK_ERRLOG("Unable to register required memory for RDMA queue.\n");
615 		spdk_nvmf_rdma_qpair_destroy(rqpair);
616 		return -1;
617 	}
618 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Command Array: %p Length: %lx LKey: %x\n",
619 		      rqpair->cmds, rqpair->max_queue_depth * sizeof(*rqpair->cmds), rqpair->cmds_mr->lkey);
620 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Completion Array: %p Length: %lx LKey: %x\n",
621 		      rqpair->cpls, rqpair->max_queue_depth * sizeof(*rqpair->cpls), rqpair->cpls_mr->lkey);
622 	if (rqpair->bufs && rqpair->bufs_mr) {
623 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
624 			      rqpair->bufs, rqpair->max_queue_depth *
625 			      transport->opts.in_capsule_data_size, rqpair->bufs_mr->lkey);
626 	}
627 
628 	/* Initialise request state queues and counters of the queue pair */
629 	for (i = RDMA_REQUEST_STATE_FREE; i < RDMA_REQUEST_NUM_STATES; i++) {
630 		TAILQ_INIT(&rqpair->state_queue[i]);
631 		rqpair->state_cntr[i] = 0;
632 	}
633 
634 	for (i = 0; i < rqpair->max_queue_depth; i++) {
635 		struct ibv_recv_wr *bad_wr = NULL;
636 
637 		rdma_recv = &rqpair->recvs[i];
638 		rdma_recv->qpair = rqpair;
639 
640 		/* Set up memory to receive commands */
641 		if (rqpair->bufs) {
642 			rdma_recv->buf = (void *)((uintptr_t)rqpair->bufs + (i *
643 						  transport->opts.in_capsule_data_size));
644 		}
645 
646 		rdma_recv->sgl[0].addr = (uintptr_t)&rqpair->cmds[i];
647 		rdma_recv->sgl[0].length = sizeof(rqpair->cmds[i]);
648 		rdma_recv->sgl[0].lkey = rqpair->cmds_mr->lkey;
649 		rdma_recv->wr.num_sge = 1;
650 
651 		if (rdma_recv->buf && rqpair->bufs_mr) {
652 			rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
653 			rdma_recv->sgl[1].length = transport->opts.in_capsule_data_size;
654 			rdma_recv->sgl[1].lkey = rqpair->bufs_mr->lkey;
655 			rdma_recv->wr.num_sge++;
656 		}
657 
658 		rdma_recv->wr.wr_id = (uintptr_t)rdma_recv;
659 		rdma_recv->wr.sg_list = rdma_recv->sgl;
660 
661 		rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_recv->wr, &bad_wr);
662 		if (rc) {
663 			SPDK_ERRLOG("Unable to post capsule for RDMA RECV\n");
664 			spdk_nvmf_rdma_qpair_destroy(rqpair);
665 			return -1;
666 		}
667 	}
668 
669 	for (i = 0; i < rqpair->max_queue_depth; i++) {
670 		rdma_req = &rqpair->reqs[i];
671 
672 		rdma_req->req.qpair = &rqpair->qpair;
673 		rdma_req->req.cmd = NULL;
674 
675 		/* Set up memory to send responses */
676 		rdma_req->req.rsp = &rqpair->cpls[i];
677 
678 		rdma_req->rsp.sgl[0].addr = (uintptr_t)&rqpair->cpls[i];
679 		rdma_req->rsp.sgl[0].length = sizeof(rqpair->cpls[i]);
680 		rdma_req->rsp.sgl[0].lkey = rqpair->cpls_mr->lkey;
681 
682 		rdma_req->rsp.wr.wr_id = (uintptr_t)rdma_req;
683 		rdma_req->rsp.wr.next = NULL;
684 		rdma_req->rsp.wr.opcode = IBV_WR_SEND;
685 		rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
686 		rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl;
687 		rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
688 
689 		/* Set up memory for data buffers */
690 		rdma_req->data.wr.wr_id = (uint64_t)rdma_req;
691 		rdma_req->data.wr.next = NULL;
692 		rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
693 		rdma_req->data.wr.sg_list = rdma_req->data.sgl;
694 		rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
695 
696 		/* Initialize request state to FREE */
697 		rdma_req->state = RDMA_REQUEST_STATE_FREE;
698 		TAILQ_INSERT_TAIL(&rqpair->state_queue[rdma_req->state], rdma_req, state_link);
699 		rqpair->state_cntr[rdma_req->state]++;
700 	}
701 
702 	return 0;
703 }
704 
705 static int
706 request_transfer_in(struct spdk_nvmf_request *req)
707 {
708 	int				rc;
709 	struct spdk_nvmf_rdma_request	*rdma_req;
710 	struct spdk_nvmf_qpair		*qpair;
711 	struct spdk_nvmf_rdma_qpair	*rqpair;
712 	struct ibv_send_wr		*bad_wr = NULL;
713 
714 	qpair = req->qpair;
715 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
716 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
717 
718 	assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
719 
720 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA READ POSTED. Request: %p Connection: %p\n", req, qpair);
721 
722 	rdma_req->data.wr.opcode = IBV_WR_RDMA_READ;
723 	rdma_req->data.wr.next = NULL;
724 	rc = ibv_post_send(rqpair->cm_id->qp, &rdma_req->data.wr, &bad_wr);
725 	if (rc) {
726 		SPDK_ERRLOG("Unable to transfer data from host to target\n");
727 		return -1;
728 	}
729 	return 0;
730 }
731 
732 static int
733 request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
734 {
735 	int				rc;
736 	struct spdk_nvmf_rdma_request	*rdma_req;
737 	struct spdk_nvmf_qpair		*qpair;
738 	struct spdk_nvmf_rdma_qpair	*rqpair;
739 	struct spdk_nvme_cpl		*rsp;
740 	struct ibv_recv_wr		*bad_recv_wr = NULL;
741 	struct ibv_send_wr		*send_wr, *bad_send_wr = NULL;
742 
743 	*data_posted = 0;
744 	qpair = req->qpair;
745 	rsp = &req->rsp->nvme_cpl;
746 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
747 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
748 
749 	/* Advance our sq_head pointer */
750 	if (qpair->sq_head == qpair->sq_head_max) {
751 		qpair->sq_head = 0;
752 	} else {
753 		qpair->sq_head++;
754 	}
755 	rsp->sqhd = qpair->sq_head;
756 
757 	/* Post the capsule to the recv buffer */
758 	assert(rdma_req->recv != NULL);
759 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA RECV POSTED. Recv: %p Connection: %p\n", rdma_req->recv,
760 		      rqpair);
761 	rc = ibv_post_recv(rqpair->cm_id->qp, &rdma_req->recv->wr, &bad_recv_wr);
762 	if (rc) {
763 		SPDK_ERRLOG("Unable to re-post rx descriptor\n");
764 		return rc;
765 	}
766 	rdma_req->recv = NULL;
767 
768 	/* Build the response which consists of an optional
769 	 * RDMA WRITE to transfer data, plus an RDMA SEND
770 	 * containing the response.
771 	 */
772 	send_wr = &rdma_req->rsp.wr;
773 
774 	if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
775 	    req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
776 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA WRITE POSTED. Request: %p Connection: %p\n", req, qpair);
777 
778 		rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE;
779 
780 		rdma_req->data.wr.next = send_wr;
781 		*data_posted = 1;
782 		send_wr = &rdma_req->data.wr;
783 	}
784 
785 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "RDMA SEND POSTED. Request: %p Connection: %p\n", req, qpair);
786 
787 	/* Send the completion */
788 	rc = ibv_post_send(rqpair->cm_id->qp, send_wr, &bad_send_wr);
789 	if (rc) {
790 		SPDK_ERRLOG("Unable to send response capsule\n");
791 	}
792 
793 	return rc;
794 }
795 
796 static int
797 spdk_nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *rqpair)
798 {
799 	struct spdk_nvmf_rdma_accept_private_data	accept_data;
800 	struct rdma_conn_param				ctrlr_event_data = {};
801 	int						rc;
802 
803 	accept_data.recfmt = 0;
804 	accept_data.crqsize = rqpair->max_queue_depth;
805 
806 	ctrlr_event_data.private_data = &accept_data;
807 	ctrlr_event_data.private_data_len = sizeof(accept_data);
808 	if (id->ps == RDMA_PS_TCP) {
809 		ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */
810 		ctrlr_event_data.initiator_depth = rqpair->max_rw_depth;
811 	}
812 
813 	rc = rdma_accept(id, &ctrlr_event_data);
814 	if (rc) {
815 		SPDK_ERRLOG("Error %d on rdma_accept\n", errno);
816 	} else {
817 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Sent back the accept\n");
818 	}
819 
820 	return rc;
821 }
822 
823 static void
824 spdk_nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport_error error)
825 {
826 	struct spdk_nvmf_rdma_reject_private_data	rej_data;
827 
828 	rej_data.recfmt = 0;
829 	rej_data.sts = error;
830 
831 	rdma_reject(id, &rej_data, sizeof(rej_data));
832 }
833 
834 static int
835 nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event,
836 		  new_qpair_fn cb_fn)
837 {
838 	struct spdk_nvmf_rdma_transport *rtransport;
839 	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
840 	struct spdk_nvmf_rdma_port	*port;
841 	struct rdma_conn_param		*rdma_param = NULL;
842 	const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
843 	uint16_t			max_queue_depth;
844 	uint16_t			max_rw_depth;
845 
846 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
847 
848 	assert(event->id != NULL); /* Impossible. Can't even reject the connection. */
849 	assert(event->id->verbs != NULL); /* Impossible. No way to handle this. */
850 
851 	rdma_param = &event->param.conn;
852 	if (rdma_param->private_data == NULL ||
853 	    rdma_param->private_data_len < sizeof(struct spdk_nvmf_rdma_request_private_data)) {
854 		SPDK_ERRLOG("connect request: no private data provided\n");
855 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH);
856 		return -1;
857 	}
858 
859 	private_data = rdma_param->private_data;
860 	if (private_data->recfmt != 0) {
861 		SPDK_ERRLOG("Received RDMA private data with RECFMT != 0\n");
862 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_RECFMT);
863 		return -1;
864 	}
865 
866 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Connect Recv on fabric intf name %s, dev_name %s\n",
867 		      event->id->verbs->device->name, event->id->verbs->device->dev_name);
868 
869 	port = event->listen_id->context;
870 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Listen Id was %p with verbs %p. ListenAddr: %p\n",
871 		      event->listen_id, event->listen_id->verbs, port);
872 
873 	/* Figure out the supported queue depth. This is a multi-step process
874 	 * that takes into account hardware maximums, host provided values,
875 	 * and our target's internal memory limits */
876 
877 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Calculating Queue Depth\n");
878 
879 	/* Start with the maximum queue depth allowed by the target */
880 	max_queue_depth = rtransport->transport.opts.max_queue_depth;
881 	max_rw_depth = rtransport->transport.opts.max_queue_depth;
882 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n",
883 		      rtransport->transport.opts.max_queue_depth);
884 
885 	/* Next check the local NIC's hardware limitations */
886 	SPDK_DEBUGLOG(SPDK_LOG_RDMA,
887 		      "Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
888 		      port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
889 	max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
890 	max_rw_depth = spdk_min(max_rw_depth, port->device->attr.max_qp_rd_atom);
891 
892 	/* Next check the remote NIC's hardware limitations */
893 	SPDK_DEBUGLOG(SPDK_LOG_RDMA,
894 		      "Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
895 		      rdma_param->initiator_depth, rdma_param->responder_resources);
896 	if (rdma_param->initiator_depth > 0) {
897 		max_rw_depth = spdk_min(max_rw_depth, rdma_param->initiator_depth);
898 	}
899 
900 	/* Finally check for the host software requested values, which are
901 	 * optional. */
902 	if (rdma_param->private_data != NULL &&
903 	    rdma_param->private_data_len >= sizeof(struct spdk_nvmf_rdma_request_private_data)) {
904 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Receive Queue Size: %d\n", private_data->hrqsize);
905 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Send Queue Size: %d\n", private_data->hsqsize);
906 		max_queue_depth = spdk_min(max_queue_depth, private_data->hrqsize);
907 		max_queue_depth = spdk_min(max_queue_depth, private_data->hsqsize + 1);
908 	}
909 
910 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
911 		      max_queue_depth, max_rw_depth);
912 
913 	rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
914 	if (rqpair == NULL) {
915 		SPDK_ERRLOG("Could not allocate new connection.\n");
916 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
917 		return -1;
918 	}
919 
920 	rqpair->port = port;
921 	rqpair->max_queue_depth = max_queue_depth;
922 	rqpair->max_rw_depth = max_rw_depth;
923 	rqpair->cm_id = event->id;
924 	rqpair->qpair.transport = transport;
925 	rqpair->max_sge = spdk_min(port->device->attr.max_sge, SPDK_NVMF_MAX_SGL_ENTRIES);
926 	TAILQ_INIT(&rqpair->incoming_queue);
927 	event->id->context = &rqpair->qpair;
928 
929 	cb_fn(&rqpair->qpair);
930 
931 	return 0;
932 }
933 
934 static int
935 nvmf_rdma_disconnect(struct rdma_cm_event *evt)
936 {
937 	struct spdk_nvmf_qpair		*qpair;
938 	struct spdk_nvmf_rdma_qpair	*rqpair;
939 
940 	if (evt->id == NULL) {
941 		SPDK_ERRLOG("disconnect request: missing cm_id\n");
942 		return -1;
943 	}
944 
945 	qpair = evt->id->context;
946 	if (qpair == NULL) {
947 		SPDK_ERRLOG("disconnect request: no active connection\n");
948 		return -1;
949 	}
950 
951 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
952 	spdk_nvmf_rdma_update_ibv_state(rqpair);
953 
954 	spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
955 
956 	return 0;
957 }
958 
959 #ifdef DEBUG
960 static const char *CM_EVENT_STR[] = {
961 	"RDMA_CM_EVENT_ADDR_RESOLVED",
962 	"RDMA_CM_EVENT_ADDR_ERROR",
963 	"RDMA_CM_EVENT_ROUTE_RESOLVED",
964 	"RDMA_CM_EVENT_ROUTE_ERROR",
965 	"RDMA_CM_EVENT_CONNECT_REQUEST",
966 	"RDMA_CM_EVENT_CONNECT_RESPONSE",
967 	"RDMA_CM_EVENT_CONNECT_ERROR",
968 	"RDMA_CM_EVENT_UNREACHABLE",
969 	"RDMA_CM_EVENT_REJECTED",
970 	"RDMA_CM_EVENT_ESTABLISHED",
971 	"RDMA_CM_EVENT_DISCONNECTED",
972 	"RDMA_CM_EVENT_DEVICE_REMOVAL",
973 	"RDMA_CM_EVENT_MULTICAST_JOIN",
974 	"RDMA_CM_EVENT_MULTICAST_ERROR",
975 	"RDMA_CM_EVENT_ADDR_CHANGE",
976 	"RDMA_CM_EVENT_TIMEWAIT_EXIT"
977 };
978 #endif /* DEBUG */
979 
980 static void
981 spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
982 {
983 	struct spdk_nvmf_rdma_transport *rtransport;
984 	struct rdma_cm_event		*event;
985 	int				rc;
986 
987 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
988 
989 	if (rtransport->event_channel == NULL) {
990 		return;
991 	}
992 
993 	while (1) {
994 		rc = rdma_get_cm_event(rtransport->event_channel, &event);
995 		if (rc == 0) {
996 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
997 
998 			switch (event->event) {
999 			case RDMA_CM_EVENT_ADDR_RESOLVED:
1000 			case RDMA_CM_EVENT_ADDR_ERROR:
1001 			case RDMA_CM_EVENT_ROUTE_RESOLVED:
1002 			case RDMA_CM_EVENT_ROUTE_ERROR:
1003 				/* No action required. The target never attempts to resolve routes. */
1004 				break;
1005 			case RDMA_CM_EVENT_CONNECT_REQUEST:
1006 				rc = nvmf_rdma_connect(transport, event, cb_fn);
1007 				if (rc < 0) {
1008 					SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
1009 					break;
1010 				}
1011 				break;
1012 			case RDMA_CM_EVENT_CONNECT_RESPONSE:
1013 				/* The target never initiates a new connection. So this will not occur. */
1014 				break;
1015 			case RDMA_CM_EVENT_CONNECT_ERROR:
1016 				/* Can this happen? The docs say it can, but not sure what causes it. */
1017 				break;
1018 			case RDMA_CM_EVENT_UNREACHABLE:
1019 			case RDMA_CM_EVENT_REJECTED:
1020 				/* These only occur on the client side. */
1021 				break;
1022 			case RDMA_CM_EVENT_ESTABLISHED:
1023 				/* TODO: Should we be waiting for this event anywhere? */
1024 				break;
1025 			case RDMA_CM_EVENT_DISCONNECTED:
1026 			case RDMA_CM_EVENT_DEVICE_REMOVAL:
1027 				rc = nvmf_rdma_disconnect(event);
1028 				if (rc < 0) {
1029 					SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
1030 					break;
1031 				}
1032 				break;
1033 			case RDMA_CM_EVENT_MULTICAST_JOIN:
1034 			case RDMA_CM_EVENT_MULTICAST_ERROR:
1035 				/* Multicast is not used */
1036 				break;
1037 			case RDMA_CM_EVENT_ADDR_CHANGE:
1038 				/* Not utilizing this event */
1039 				break;
1040 			case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1041 				/* For now, do nothing. The target never re-uses queue pairs. */
1042 				break;
1043 			default:
1044 				SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
1045 				break;
1046 			}
1047 
1048 			rdma_ack_cm_event(event);
1049 		} else {
1050 			if (errno != EAGAIN && errno != EWOULDBLOCK) {
1051 				SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));
1052 			}
1053 			break;
1054 		}
1055 	}
1056 }
1057 
1058 static int
1059 spdk_nvmf_rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
1060 			  enum spdk_mem_map_notify_action action,
1061 			  void *vaddr, size_t size)
1062 {
1063 	struct spdk_nvmf_rdma_device *device = cb_ctx;
1064 	struct ibv_pd *pd = device->pd;
1065 	struct ibv_mr *mr;
1066 
1067 	switch (action) {
1068 	case SPDK_MEM_MAP_NOTIFY_REGISTER:
1069 		mr = ibv_reg_mr(pd, vaddr, size,
1070 				IBV_ACCESS_LOCAL_WRITE |
1071 				IBV_ACCESS_REMOTE_READ |
1072 				IBV_ACCESS_REMOTE_WRITE);
1073 		if (mr == NULL) {
1074 			SPDK_ERRLOG("ibv_reg_mr() failed\n");
1075 			return -1;
1076 		} else {
1077 			spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, (uint64_t)mr);
1078 		}
1079 		break;
1080 	case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
1081 		mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, size);
1082 		spdk_mem_map_clear_translation(map, (uint64_t)vaddr, size);
1083 		if (mr) {
1084 			ibv_dereg_mr(mr);
1085 		}
1086 		break;
1087 	}
1088 
1089 	return 0;
1090 }
1091 
1092 typedef enum spdk_nvme_data_transfer spdk_nvme_data_transfer_t;
1093 
1094 static spdk_nvme_data_transfer_t
1095 spdk_nvmf_rdma_request_get_xfer(struct spdk_nvmf_rdma_request *rdma_req)
1096 {
1097 	enum spdk_nvme_data_transfer xfer;
1098 	struct spdk_nvme_cmd *cmd = &rdma_req->req.cmd->nvme_cmd;
1099 	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
1100 
1101 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1102 	rdma_req->rsp.wr.opcode = IBV_WR_SEND;
1103 	rdma_req->rsp.wr.imm_data = 0;
1104 #endif
1105 
1106 	/* Figure out data transfer direction */
1107 	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
1108 		xfer = spdk_nvme_opc_get_data_transfer(rdma_req->req.cmd->nvmf_cmd.fctype);
1109 	} else {
1110 		xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
1111 
1112 		/* Some admin commands are special cases */
1113 		if ((rdma_req->req.qpair->qid == 0) &&
1114 		    ((cmd->opc == SPDK_NVME_OPC_GET_FEATURES) ||
1115 		     (cmd->opc == SPDK_NVME_OPC_SET_FEATURES))) {
1116 			switch (cmd->cdw10 & 0xff) {
1117 			case SPDK_NVME_FEAT_LBA_RANGE_TYPE:
1118 			case SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1119 			case SPDK_NVME_FEAT_HOST_IDENTIFIER:
1120 				break;
1121 			default:
1122 				xfer = SPDK_NVME_DATA_NONE;
1123 			}
1124 		}
1125 	}
1126 
1127 	if (xfer == SPDK_NVME_DATA_NONE) {
1128 		return xfer;
1129 	}
1130 
1131 	/* Even for commands that may transfer data, they could have specified 0 length.
1132 	 * We want those to show up with xfer SPDK_NVME_DATA_NONE.
1133 	 */
1134 	switch (sgl->generic.type) {
1135 	case SPDK_NVME_SGL_TYPE_DATA_BLOCK:
1136 	case SPDK_NVME_SGL_TYPE_BIT_BUCKET:
1137 	case SPDK_NVME_SGL_TYPE_SEGMENT:
1138 	case SPDK_NVME_SGL_TYPE_LAST_SEGMENT:
1139 	case SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK:
1140 		if (sgl->unkeyed.length == 0) {
1141 			xfer = SPDK_NVME_DATA_NONE;
1142 		}
1143 		break;
1144 	case SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK:
1145 		if (sgl->keyed.length == 0) {
1146 			xfer = SPDK_NVME_DATA_NONE;
1147 		}
1148 		break;
1149 	}
1150 
1151 	return xfer;
1152 }
1153 
1154 static int
1155 spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
1156 				 struct spdk_nvmf_rdma_device *device,
1157 				 struct spdk_nvmf_rdma_request *rdma_req)
1158 {
1159 	void		*buf = NULL;
1160 	uint32_t	length = rdma_req->req.length;
1161 	uint32_t	i = 0;
1162 
1163 	rdma_req->req.iovcnt = 0;
1164 	while (length) {
1165 		buf = spdk_mempool_get(rtransport->data_buf_pool);
1166 		if (!buf) {
1167 			goto nomem;
1168 		}
1169 
1170 		rdma_req->req.iov[i].iov_base = (void *)((uintptr_t)(buf + NVMF_DATA_BUFFER_MASK) &
1171 						~NVMF_DATA_BUFFER_MASK);
1172 		rdma_req->req.iov[i].iov_len  = spdk_min(length, rtransport->transport.opts.io_unit_size);
1173 		rdma_req->req.iovcnt++;
1174 		rdma_req->data.buffers[i] = buf;
1175 		rdma_req->data.wr.sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[i].iov_base);
1176 		rdma_req->data.wr.sg_list[i].length = rdma_req->req.iov[i].iov_len;
1177 		rdma_req->data.wr.sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
1178 						     (uint64_t)buf, rdma_req->req.iov[i].iov_len))->lkey;
1179 
1180 		length -= rdma_req->req.iov[i].iov_len;
1181 		i++;
1182 	}
1183 
1184 	rdma_req->data_from_pool = true;
1185 
1186 	return 0;
1187 
1188 nomem:
1189 	while (i) {
1190 		i--;
1191 		spdk_mempool_put(rtransport->data_buf_pool, rdma_req->req.iov[i].iov_base);
1192 		rdma_req->req.iov[i].iov_base = NULL;
1193 		rdma_req->req.iov[i].iov_len = 0;
1194 
1195 		rdma_req->data.wr.sg_list[i].addr = 0;
1196 		rdma_req->data.wr.sg_list[i].length = 0;
1197 		rdma_req->data.wr.sg_list[i].lkey = 0;
1198 	}
1199 	rdma_req->req.iovcnt = 0;
1200 	return -ENOMEM;
1201 }
1202 
1203 static int
1204 spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
1205 				 struct spdk_nvmf_rdma_device *device,
1206 				 struct spdk_nvmf_rdma_request *rdma_req)
1207 {
1208 	struct spdk_nvme_cmd			*cmd;
1209 	struct spdk_nvme_cpl			*rsp;
1210 	struct spdk_nvme_sgl_descriptor		*sgl;
1211 
1212 	cmd = &rdma_req->req.cmd->nvme_cmd;
1213 	rsp = &rdma_req->req.rsp->nvme_cpl;
1214 	sgl = &cmd->dptr.sgl1;
1215 
1216 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
1217 	    (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
1218 	     sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
1219 		if (sgl->keyed.length > rtransport->transport.opts.max_io_size) {
1220 			SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
1221 				    sgl->keyed.length, rtransport->transport.opts.max_io_size);
1222 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1223 			return -1;
1224 		}
1225 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1226 		/**
1227 		 * These vendor IDs are assigned by the IEEE and an ID of 0 implies Soft-RoCE.
1228 		 * The Soft-RoCE RXE driver does not currently support send with invalidate.
1229 		 * There are changes making their way through the kernel now that will enable
1230 		 * this feature. When they are merged, we can conditionally enable this feature.
1231 		 *
1232 		 * todo: enable this for versions of the kernel rxe driver that support it.
1233 		 */
1234 		if (device->attr.vendor_id != 0) {
1235 			if (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
1236 				rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1237 				rdma_req->rsp.wr.imm_data = sgl->keyed.key;
1238 			}
1239 		}
1240 #endif
1241 
1242 		/* fill request length and populate iovs */
1243 		rdma_req->req.length = sgl->keyed.length;
1244 
1245 		if (spdk_nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req) < 0) {
1246 			/* No available buffers. Queue this request up. */
1247 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "No available large data buffers. Queueing request %p\n", rdma_req);
1248 			return 0;
1249 		}
1250 
1251 		/* backward compatible */
1252 		rdma_req->req.data = rdma_req->req.iov[0].iov_base;
1253 
1254 		/* rdma wr specifics */
1255 		rdma_req->data.wr.num_sge = rdma_req->req.iovcnt;
1256 		rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
1257 		rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
1258 
1259 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
1260 			      rdma_req->req.iovcnt);
1261 
1262 		return 0;
1263 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
1264 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
1265 		uint64_t offset = sgl->address;
1266 		uint32_t max_len = rtransport->transport.opts.in_capsule_data_size;
1267 
1268 		SPDK_DEBUGLOG(SPDK_LOG_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
1269 			      offset, sgl->unkeyed.length);
1270 
1271 		if (offset > max_len) {
1272 			SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
1273 				    offset, max_len);
1274 			rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET;
1275 			return -1;
1276 		}
1277 		max_len -= (uint32_t)offset;
1278 
1279 		if (sgl->unkeyed.length > max_len) {
1280 			SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
1281 				    sgl->unkeyed.length, max_len);
1282 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1283 			return -1;
1284 		}
1285 
1286 		rdma_req->req.data = rdma_req->recv->buf + offset;
1287 		rdma_req->data_from_pool = false;
1288 		rdma_req->req.length = sgl->unkeyed.length;
1289 
1290 		rdma_req->req.iov[0].iov_base = rdma_req->req.data;
1291 		rdma_req->req.iov[0].iov_len = rdma_req->req.length;
1292 		rdma_req->req.iovcnt = 1;
1293 
1294 		return 0;
1295 	}
1296 
1297 	SPDK_ERRLOG("Invalid NVMf I/O Command SGL:  Type 0x%x, Subtype 0x%x\n",
1298 		    sgl->generic.type, sgl->generic.subtype);
1299 	rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
1300 	return -1;
1301 }
1302 
1303 static bool
1304 spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
1305 			       struct spdk_nvmf_rdma_request *rdma_req)
1306 {
1307 	struct spdk_nvmf_rdma_qpair	*rqpair;
1308 	struct spdk_nvmf_rdma_device	*device;
1309 	struct spdk_nvme_cpl		*rsp = &rdma_req->req.rsp->nvme_cpl;
1310 	int				rc;
1311 	struct spdk_nvmf_rdma_recv	*rdma_recv;
1312 	enum spdk_nvmf_rdma_request_state prev_state;
1313 	bool				progress = false;
1314 	int				data_posted;
1315 	int				cur_rdma_rw_depth;
1316 
1317 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1318 	device = rqpair->port->device;
1319 
1320 	assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
1321 
1322 	/* If the queue pair is in an error state, force the request to the completed state
1323 	 * to release resources. */
1324 	if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1325 		if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
1326 			TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
1327 		}
1328 		spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
1329 	}
1330 
1331 	/* The loop here is to allow for several back-to-back state changes. */
1332 	do {
1333 		prev_state = rdma_req->state;
1334 
1335 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p entering state %d\n", rdma_req, prev_state);
1336 
1337 		switch (rdma_req->state) {
1338 		case RDMA_REQUEST_STATE_FREE:
1339 			/* Some external code must kick a request into RDMA_REQUEST_STATE_NEW
1340 			 * to escape this state. */
1341 			break;
1342 		case RDMA_REQUEST_STATE_NEW:
1343 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEW, 0, 0, (uintptr_t)rdma_req, 0);
1344 			rdma_recv = rdma_req->recv;
1345 
1346 			/* The first element of the SGL is the NVMe command */
1347 			rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
1348 			memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
1349 
1350 			TAILQ_REMOVE(&rqpair->incoming_queue, rdma_recv, link);
1351 
1352 			if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
1353 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
1354 				break;
1355 			}
1356 
1357 			/* The next state transition depends on the data transfer needs of this request. */
1358 			rdma_req->req.xfer = spdk_nvmf_rdma_request_get_xfer(rdma_req);
1359 
1360 			/* If no data to transfer, ready to execute. */
1361 			if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
1362 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
1363 				break;
1364 			}
1365 
1366 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEED_BUFFER);
1367 			TAILQ_INSERT_TAIL(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
1368 			break;
1369 		case RDMA_REQUEST_STATE_NEED_BUFFER:
1370 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)rdma_req, 0);
1371 
1372 			assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
1373 
1374 			if (rdma_req != TAILQ_FIRST(&rqpair->ch->pending_data_buf_queue)) {
1375 				/* This request needs to wait in line to obtain a buffer */
1376 				break;
1377 			}
1378 
1379 			/* Try to get a data buffer */
1380 			rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
1381 			if (rc < 0) {
1382 				TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
1383 				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1384 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
1385 				break;
1386 			}
1387 
1388 			if (!rdma_req->req.data) {
1389 				/* No buffers available. */
1390 				break;
1391 			}
1392 
1393 			TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
1394 
1395 			/* If data is transferring from host to controller and the data didn't
1396 			 * arrive using in capsule data, we need to do a transfer from the host.
1397 			 */
1398 			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) {
1399 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
1400 				break;
1401 			}
1402 
1403 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
1404 			break;
1405 		case RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING:
1406 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING, 0, 0,
1407 					  (uintptr_t)rdma_req, 0);
1408 
1409 			if (rdma_req != TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING])) {
1410 				/* This request needs to wait in line to perform RDMA */
1411 				break;
1412 			}
1413 			cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
1414 
1415 			if (cur_rdma_rw_depth >= rqpair->max_rw_depth) {
1416 				/* R/W queue is full, need to wait */
1417 				break;
1418 			}
1419 
1420 			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1421 				rc = request_transfer_in(&rdma_req->req);
1422 				if (!rc) {
1423 					spdk_nvmf_rdma_request_set_state(rdma_req,
1424 									 RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
1425 				} else {
1426 					rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1427 					spdk_nvmf_rdma_request_set_state(rdma_req,
1428 									 RDMA_REQUEST_STATE_READY_TO_COMPLETE);
1429 				}
1430 			} else if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1431 				/* The data transfer will be kicked off from
1432 				 * RDMA_REQUEST_STATE_READY_TO_COMPLETE state.
1433 				 */
1434 				spdk_nvmf_rdma_request_set_state(rdma_req,
1435 								 RDMA_REQUEST_STATE_READY_TO_COMPLETE);
1436 			} else {
1437 				SPDK_ERRLOG("Cannot perform data transfer, unknown state: %u\n",
1438 					    rdma_req->req.xfer);
1439 				assert(0);
1440 			}
1441 			break;
1442 		case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
1443 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0,
1444 					  (uintptr_t)rdma_req, 0);
1445 			/* Some external code must kick a request into RDMA_REQUEST_STATE_READY_TO_EXECUTE
1446 			 * to escape this state. */
1447 			break;
1448 		case RDMA_REQUEST_STATE_READY_TO_EXECUTE:
1449 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0, (uintptr_t)rdma_req, 0);
1450 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTING);
1451 			spdk_nvmf_request_exec(&rdma_req->req);
1452 			break;
1453 		case RDMA_REQUEST_STATE_EXECUTING:
1454 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTING, 0, 0, (uintptr_t)rdma_req, 0);
1455 			/* Some external code must kick a request into RDMA_REQUEST_STATE_EXECUTED
1456 			 * to escape this state. */
1457 			break;
1458 		case RDMA_REQUEST_STATE_EXECUTED:
1459 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0, (uintptr_t)rdma_req, 0);
1460 			if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1461 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
1462 			} else {
1463 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
1464 			}
1465 			break;
1466 		case RDMA_REQUEST_STATE_READY_TO_COMPLETE:
1467 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 0, 0, (uintptr_t)rdma_req, 0);
1468 			rc = request_transfer_out(&rdma_req->req, &data_posted);
1469 			assert(rc == 0); /* No good way to handle this currently */
1470 			spdk_nvmf_rdma_request_set_state(rdma_req,
1471 							 data_posted ?
1472 							 RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
1473 							 RDMA_REQUEST_STATE_COMPLETING);
1474 			break;
1475 		case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
1476 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0,
1477 					  (uintptr_t)rdma_req,
1478 					  0);
1479 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
1480 			 * to escape this state. */
1481 			break;
1482 		case RDMA_REQUEST_STATE_COMPLETING:
1483 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETING, 0, 0, (uintptr_t)rdma_req, 0);
1484 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
1485 			 * to escape this state. */
1486 			break;
1487 		case RDMA_REQUEST_STATE_COMPLETED:
1488 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)rdma_req, 0);
1489 
1490 			if (rdma_req->data_from_pool) {
1491 				/* Put the buffer/s back in the pool */
1492 				for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
1493 					spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
1494 					rdma_req->req.iov[i].iov_base = NULL;
1495 					rdma_req->data.buffers[i] = NULL;
1496 				}
1497 				rdma_req->data_from_pool = false;
1498 			}
1499 			rdma_req->req.length = 0;
1500 			rdma_req->req.iovcnt = 0;
1501 			rdma_req->req.data = NULL;
1502 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE);
1503 			break;
1504 		case RDMA_REQUEST_NUM_STATES:
1505 		default:
1506 			assert(0);
1507 			break;
1508 		}
1509 
1510 		if (rdma_req->state != prev_state) {
1511 			progress = true;
1512 		}
1513 	} while (rdma_req->state != prev_state);
1514 
1515 	return progress;
1516 }
1517 
1518 /* Public API callbacks begin here */
1519 
1520 static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
1521 
1522 static struct spdk_nvmf_transport *
1523 spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
1524 {
1525 	int rc;
1526 	struct spdk_nvmf_rdma_transport *rtransport;
1527 	struct spdk_nvmf_rdma_device	*device, *tmp;
1528 	struct ibv_context		**contexts;
1529 	uint32_t			i;
1530 	int				flag;
1531 	uint32_t			sge_count;
1532 
1533 	rtransport = calloc(1, sizeof(*rtransport));
1534 	if (!rtransport) {
1535 		return NULL;
1536 	}
1537 
1538 	if (pthread_mutex_init(&rtransport->lock, NULL)) {
1539 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1540 		free(rtransport);
1541 		return NULL;
1542 	}
1543 
1544 	spdk_io_device_register(rtransport, spdk_nvmf_rdma_mgmt_channel_create,
1545 				spdk_nvmf_rdma_mgmt_channel_destroy,
1546 				sizeof(struct spdk_nvmf_rdma_mgmt_channel));
1547 
1548 	TAILQ_INIT(&rtransport->devices);
1549 	TAILQ_INIT(&rtransport->ports);
1550 
1551 	rtransport->transport.ops = &spdk_nvmf_transport_rdma;
1552 
1553 	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
1554 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1555 		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1556 		     "  in_capsule_data_size=%d, max_aq_depth=%d\n",
1557 		     opts->max_queue_depth,
1558 		     opts->max_io_size,
1559 		     opts->max_qpairs_per_ctrlr,
1560 		     opts->io_unit_size,
1561 		     opts->in_capsule_data_size,
1562 		     opts->max_aq_depth);
1563 
1564 	/* I/O unit size cannot be larger than max I/O size */
1565 	if (opts->io_unit_size > opts->max_io_size) {
1566 		opts->io_unit_size = opts->max_io_size;
1567 	}
1568 
1569 	sge_count = opts->max_io_size / opts->io_unit_size;
1570 	if (sge_count > SPDK_NVMF_MAX_SGL_ENTRIES) {
1571 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1572 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1573 		return NULL;
1574 	}
1575 
1576 	rtransport->event_channel = rdma_create_event_channel();
1577 	if (rtransport->event_channel == NULL) {
1578 		SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno));
1579 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1580 		return NULL;
1581 	}
1582 
1583 	flag = fcntl(rtransport->event_channel->fd, F_GETFL);
1584 	if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
1585 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n",
1586 			    rtransport->event_channel->fd, spdk_strerror(errno));
1587 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1588 		return NULL;
1589 	}
1590 
1591 	rtransport->data_buf_pool = spdk_mempool_create("spdk_nvmf_rdma",
1592 				    opts->max_queue_depth * 4, /* The 4 is arbitrarily chosen. Needs to be configurable. */
1593 				    opts->max_io_size + NVMF_DATA_BUFFER_ALIGNMENT,
1594 				    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
1595 				    SPDK_ENV_SOCKET_ID_ANY);
1596 	if (!rtransport->data_buf_pool) {
1597 		SPDK_ERRLOG("Unable to allocate buffer pool for poll group\n");
1598 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1599 		return NULL;
1600 	}
1601 
1602 	contexts = rdma_get_devices(NULL);
1603 	if (contexts == NULL) {
1604 		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
1605 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1606 		return NULL;
1607 	}
1608 
1609 	i = 0;
1610 	rc = 0;
1611 	while (contexts[i] != NULL) {
1612 		device = calloc(1, sizeof(*device));
1613 		if (!device) {
1614 			SPDK_ERRLOG("Unable to allocate memory for RDMA devices.\n");
1615 			rc = -ENOMEM;
1616 			break;
1617 		}
1618 		device->context = contexts[i];
1619 		rc = ibv_query_device(device->context, &device->attr);
1620 		if (rc < 0) {
1621 			SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
1622 			free(device);
1623 			break;
1624 
1625 		}
1626 		/* set up device context async ev fd as NON_BLOCKING */
1627 		flag = fcntl(device->context->async_fd, F_GETFL);
1628 		rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK);
1629 		if (rc < 0) {
1630 			SPDK_ERRLOG("Failed to set context async fd to NONBLOCK.\n");
1631 			free(device);
1632 			break;
1633 		}
1634 
1635 		device->pd = ibv_alloc_pd(device->context);
1636 		if (!device->pd) {
1637 			SPDK_ERRLOG("Unable to allocate protection domain.\n");
1638 			free(device);
1639 			rc = -1;
1640 			break;
1641 		}
1642 
1643 		device->map = spdk_mem_map_alloc(0, spdk_nvmf_rdma_mem_notify, device);
1644 		if (!device->map) {
1645 			SPDK_ERRLOG("Unable to allocate memory map for new poll group\n");
1646 			ibv_dealloc_pd(device->pd);
1647 			free(device);
1648 			rc = -1;
1649 			break;
1650 		}
1651 
1652 		TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
1653 		i++;
1654 	}
1655 	rdma_free_devices(contexts);
1656 
1657 	if (rc < 0) {
1658 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1659 		return NULL;
1660 	}
1661 
1662 	/* Set up poll descriptor array to monitor events from RDMA and IB
1663 	 * in a single poll syscall
1664 	 */
1665 	rtransport->npoll_fds = i + 1;
1666 	i = 0;
1667 	rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
1668 	if (rtransport->poll_fds == NULL) {
1669 		SPDK_ERRLOG("poll_fds allocation failed\n");
1670 		spdk_nvmf_rdma_destroy(&rtransport->transport);
1671 		return NULL;
1672 	}
1673 
1674 	rtransport->poll_fds[i].fd = rtransport->event_channel->fd;
1675 	rtransport->poll_fds[i++].events = POLLIN;
1676 
1677 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
1678 		rtransport->poll_fds[i].fd = device->context->async_fd;
1679 		rtransport->poll_fds[i++].events = POLLIN;
1680 	}
1681 
1682 	return &rtransport->transport;
1683 }
1684 
1685 static int
1686 spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
1687 {
1688 	struct spdk_nvmf_rdma_transport	*rtransport;
1689 	struct spdk_nvmf_rdma_port	*port, *port_tmp;
1690 	struct spdk_nvmf_rdma_device	*device, *device_tmp;
1691 
1692 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
1693 
1694 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, port_tmp) {
1695 		TAILQ_REMOVE(&rtransport->ports, port, link);
1696 		rdma_destroy_id(port->id);
1697 		free(port);
1698 	}
1699 
1700 	if (rtransport->poll_fds != NULL) {
1701 		free(rtransport->poll_fds);
1702 	}
1703 
1704 	if (rtransport->event_channel != NULL) {
1705 		rdma_destroy_event_channel(rtransport->event_channel);
1706 	}
1707 
1708 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) {
1709 		TAILQ_REMOVE(&rtransport->devices, device, link);
1710 		if (device->map) {
1711 			spdk_mem_map_free(&device->map);
1712 		}
1713 		if (device->pd) {
1714 			ibv_dealloc_pd(device->pd);
1715 		}
1716 		free(device);
1717 	}
1718 
1719 	if (rtransport->data_buf_pool != NULL) {
1720 		if (spdk_mempool_count(rtransport->data_buf_pool) !=
1721 		    (transport->opts.max_queue_depth * 4)) {
1722 			SPDK_ERRLOG("transport buffer pool count is %zu but should be %u\n",
1723 				    spdk_mempool_count(rtransport->data_buf_pool),
1724 				    transport->opts.max_queue_depth * 4);
1725 		}
1726 	}
1727 
1728 	spdk_mempool_free(rtransport->data_buf_pool);
1729 	spdk_io_device_unregister(rtransport, NULL);
1730 	pthread_mutex_destroy(&rtransport->lock);
1731 	free(rtransport);
1732 
1733 	return 0;
1734 }
1735 
1736 static int
1737 spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
1738 		      const struct spdk_nvme_transport_id *trid)
1739 {
1740 	struct spdk_nvmf_rdma_transport	*rtransport;
1741 	struct spdk_nvmf_rdma_device	*device;
1742 	struct spdk_nvmf_rdma_port	*port_tmp, *port;
1743 	struct addrinfo			*res;
1744 	struct addrinfo			hints;
1745 	int				family;
1746 	int				rc;
1747 
1748 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
1749 
1750 	port = calloc(1, sizeof(*port));
1751 	if (!port) {
1752 		return -ENOMEM;
1753 	}
1754 
1755 	/* Selectively copy the trid. Things like NQN don't matter here - that
1756 	 * mapping is enforced elsewhere.
1757 	 */
1758 	port->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1759 	port->trid.adrfam = trid->adrfam;
1760 	snprintf(port->trid.traddr, sizeof(port->trid.traddr), "%s", trid->traddr);
1761 	snprintf(port->trid.trsvcid, sizeof(port->trid.trsvcid), "%s", trid->trsvcid);
1762 
1763 	pthread_mutex_lock(&rtransport->lock);
1764 	assert(rtransport->event_channel != NULL);
1765 	TAILQ_FOREACH(port_tmp, &rtransport->ports, link) {
1766 		if (spdk_nvme_transport_id_compare(&port_tmp->trid, &port->trid) == 0) {
1767 			port_tmp->ref++;
1768 			free(port);
1769 			/* Already listening at this address */
1770 			pthread_mutex_unlock(&rtransport->lock);
1771 			return 0;
1772 		}
1773 	}
1774 
1775 	rc = rdma_create_id(rtransport->event_channel, &port->id, port, RDMA_PS_TCP);
1776 	if (rc < 0) {
1777 		SPDK_ERRLOG("rdma_create_id() failed\n");
1778 		free(port);
1779 		pthread_mutex_unlock(&rtransport->lock);
1780 		return rc;
1781 	}
1782 
1783 	switch (port->trid.adrfam) {
1784 	case SPDK_NVMF_ADRFAM_IPV4:
1785 		family = AF_INET;
1786 		break;
1787 	case SPDK_NVMF_ADRFAM_IPV6:
1788 		family = AF_INET6;
1789 		break;
1790 	default:
1791 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", port->trid.adrfam);
1792 		free(port);
1793 		pthread_mutex_unlock(&rtransport->lock);
1794 		return -EINVAL;
1795 	}
1796 
1797 	memset(&hints, 0, sizeof(hints));
1798 	hints.ai_family = family;
1799 	hints.ai_socktype = SOCK_STREAM;
1800 	hints.ai_protocol = 0;
1801 
1802 	rc = getaddrinfo(port->trid.traddr, port->trid.trsvcid, &hints, &res);
1803 	if (rc) {
1804 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(rc), rc);
1805 		free(port);
1806 		pthread_mutex_unlock(&rtransport->lock);
1807 		return -EINVAL;
1808 	}
1809 
1810 	rc = rdma_bind_addr(port->id, res->ai_addr);
1811 	freeaddrinfo(res);
1812 
1813 	if (rc < 0) {
1814 		SPDK_ERRLOG("rdma_bind_addr() failed\n");
1815 		rdma_destroy_id(port->id);
1816 		free(port);
1817 		pthread_mutex_unlock(&rtransport->lock);
1818 		return rc;
1819 	}
1820 
1821 	if (!port->id->verbs) {
1822 		SPDK_ERRLOG("ibv_context is null\n");
1823 		rdma_destroy_id(port->id);
1824 		free(port);
1825 		pthread_mutex_unlock(&rtransport->lock);
1826 		return -1;
1827 	}
1828 
1829 	rc = rdma_listen(port->id, 10); /* 10 = backlog */
1830 	if (rc < 0) {
1831 		SPDK_ERRLOG("rdma_listen() failed\n");
1832 		rdma_destroy_id(port->id);
1833 		free(port);
1834 		pthread_mutex_unlock(&rtransport->lock);
1835 		return rc;
1836 	}
1837 
1838 	TAILQ_FOREACH(device, &rtransport->devices, link) {
1839 		if (device->context == port->id->verbs) {
1840 			port->device = device;
1841 			break;
1842 		}
1843 	}
1844 	if (!port->device) {
1845 		SPDK_ERRLOG("Accepted a connection with verbs %p, but unable to find a corresponding device.\n",
1846 			    port->id->verbs);
1847 		rdma_destroy_id(port->id);
1848 		free(port);
1849 		pthread_mutex_unlock(&rtransport->lock);
1850 		return -EINVAL;
1851 	}
1852 
1853 	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** NVMf Target Listening on %s port %d ***\n",
1854 		     port->trid.traddr, ntohs(rdma_get_src_port(port->id)));
1855 
1856 	port->ref = 1;
1857 
1858 	TAILQ_INSERT_TAIL(&rtransport->ports, port, link);
1859 	pthread_mutex_unlock(&rtransport->lock);
1860 
1861 	return 0;
1862 }
1863 
1864 static int
1865 spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
1866 			   const struct spdk_nvme_transport_id *_trid)
1867 {
1868 	struct spdk_nvmf_rdma_transport *rtransport;
1869 	struct spdk_nvmf_rdma_port *port, *tmp;
1870 	struct spdk_nvme_transport_id trid = {};
1871 
1872 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
1873 
1874 	/* Selectively copy the trid. Things like NQN don't matter here - that
1875 	 * mapping is enforced elsewhere.
1876 	 */
1877 	trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1878 	trid.adrfam = _trid->adrfam;
1879 	snprintf(trid.traddr, sizeof(port->trid.traddr), "%s", _trid->traddr);
1880 	snprintf(trid.trsvcid, sizeof(port->trid.trsvcid), "%s", _trid->trsvcid);
1881 
1882 	pthread_mutex_lock(&rtransport->lock);
1883 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, tmp) {
1884 		if (spdk_nvme_transport_id_compare(&port->trid, &trid) == 0) {
1885 			assert(port->ref > 0);
1886 			port->ref--;
1887 			if (port->ref == 0) {
1888 				TAILQ_REMOVE(&rtransport->ports, port, link);
1889 				rdma_destroy_id(port->id);
1890 				free(port);
1891 			}
1892 			break;
1893 		}
1894 	}
1895 
1896 	pthread_mutex_unlock(&rtransport->lock);
1897 	return 0;
1898 }
1899 
1900 static bool
1901 spdk_nvmf_rdma_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
1902 {
1903 	int cur_queue_depth, cur_rdma_rw_depth;
1904 	struct spdk_nvmf_rdma_qpair *rqpair;
1905 
1906 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
1907 	cur_queue_depth = spdk_nvmf_rdma_cur_queue_depth(rqpair);
1908 	cur_rdma_rw_depth = spdk_nvmf_rdma_cur_rw_depth(rqpair);
1909 
1910 	if (cur_queue_depth == 0 && cur_rdma_rw_depth == 0) {
1911 		return true;
1912 	}
1913 	return false;
1914 }
1915 
1916 static void
1917 spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
1918 				     struct spdk_nvmf_rdma_qpair *rqpair)
1919 {
1920 	struct spdk_nvmf_rdma_recv	*rdma_recv, *recv_tmp;
1921 	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
1922 
1923 	/* We process I/O in the data transfer pending queue at the highest priority. */
1924 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING],
1925 			   state_link, req_tmp) {
1926 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
1927 			break;
1928 		}
1929 	}
1930 
1931 	/* The second highest priority is I/O waiting on memory buffers. */
1932 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->ch->pending_data_buf_queue, link,
1933 			   req_tmp) {
1934 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
1935 			break;
1936 		}
1937 	}
1938 
1939 	if (rqpair->qpair_disconnected) {
1940 		spdk_nvmf_rdma_qpair_destroy(rqpair);
1941 		return;
1942 	}
1943 
1944 	/* Do not process newly received commands if qp is in ERROR state,
1945 	 * wait till the recovery is complete.
1946 	 */
1947 	if (rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
1948 		return;
1949 	}
1950 
1951 	/* The lowest priority is processing newly received commands */
1952 	TAILQ_FOREACH_SAFE(rdma_recv, &rqpair->incoming_queue, link, recv_tmp) {
1953 		if (TAILQ_EMPTY(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE])) {
1954 			break;
1955 		}
1956 
1957 		rdma_req = TAILQ_FIRST(&rqpair->state_queue[RDMA_REQUEST_STATE_FREE]);
1958 		rdma_req->recv = rdma_recv;
1959 		spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_NEW);
1960 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
1961 			break;
1962 		}
1963 	}
1964 }
1965 
1966 static void
1967 spdk_nvmf_rdma_drain_state_queue(struct spdk_nvmf_rdma_qpair *rqpair,
1968 				 enum spdk_nvmf_rdma_request_state state)
1969 {
1970 	struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
1971 	struct spdk_nvmf_rdma_transport *rtransport;
1972 
1973 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[state], state_link, req_tmp) {
1974 		rtransport = SPDK_CONTAINEROF(rdma_req->req.qpair->transport,
1975 					      struct spdk_nvmf_rdma_transport, transport);
1976 		spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
1977 		spdk_nvmf_rdma_request_process(rtransport, rdma_req);
1978 	}
1979 }
1980 
1981 static void
1982 spdk_nvmf_rdma_qpair_recover(struct spdk_nvmf_rdma_qpair *rqpair)
1983 {
1984 	enum ibv_qp_state state, next_state;
1985 	int recovered;
1986 	struct spdk_nvmf_rdma_transport *rtransport;
1987 
1988 	if (!spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
1989 		/* There must be outstanding requests down to media.
1990 		 * If so, wait till they're complete.
1991 		 */
1992 		assert(!TAILQ_EMPTY(&rqpair->qpair.outstanding));
1993 		return;
1994 	}
1995 
1996 	state = rqpair->ibv_attr.qp_state;
1997 	next_state = state;
1998 
1999 	SPDK_NOTICELOG("RDMA qpair %u is in state: %s\n",
2000 		       rqpair->qpair.qid,
2001 		       str_ibv_qp_state[state]);
2002 
2003 	if (!(state == IBV_QPS_ERR || state == IBV_QPS_RESET)) {
2004 		SPDK_ERRLOG("Can't recover RDMA qpair %u from the state: %s\n",
2005 			    rqpair->qpair.qid,
2006 			    str_ibv_qp_state[state]);
2007 		spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
2008 		return;
2009 	}
2010 
2011 	recovered = 0;
2012 	while (!recovered) {
2013 		switch (state) {
2014 		case IBV_QPS_ERR:
2015 			next_state = IBV_QPS_RESET;
2016 			break;
2017 		case IBV_QPS_RESET:
2018 			next_state = IBV_QPS_INIT;
2019 			break;
2020 		case IBV_QPS_INIT:
2021 			next_state = IBV_QPS_RTR;
2022 			break;
2023 		case IBV_QPS_RTR:
2024 			next_state = IBV_QPS_RTS;
2025 			break;
2026 		case IBV_QPS_RTS:
2027 			recovered = 1;
2028 			break;
2029 		default:
2030 			SPDK_ERRLOG("RDMA qpair %u unexpected state for recovery: %u\n",
2031 				    rqpair->qpair.qid, state);
2032 			goto error;
2033 		}
2034 		/* Do not transition into same state */
2035 		if (next_state == state) {
2036 			break;
2037 		}
2038 
2039 		if (spdk_nvmf_rdma_set_ibv_state(rqpair, next_state)) {
2040 			goto error;
2041 		}
2042 
2043 		state = next_state;
2044 	}
2045 
2046 	rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
2047 				      struct spdk_nvmf_rdma_transport,
2048 				      transport);
2049 
2050 	spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
2051 
2052 	return;
2053 error:
2054 	SPDK_NOTICELOG("RDMA qpair %u: recovery failed, disconnecting...\n",
2055 		       rqpair->qpair.qid);
2056 	spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
2057 }
2058 
2059 /* Clean up only the states that can be aborted at any time */
2060 static void
2061 _spdk_nvmf_rdma_qp_cleanup_safe_states(struct spdk_nvmf_rdma_qpair *rqpair)
2062 {
2063 	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
2064 
2065 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEW);
2066 	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->state_queue[RDMA_REQUEST_STATE_NEED_BUFFER], link, req_tmp) {
2067 		TAILQ_REMOVE(&rqpair->ch->pending_data_buf_queue, rdma_req, link);
2068 	}
2069 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);
2070 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);
2071 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
2072 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTED);
2073 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_READY_TO_COMPLETE);
2074 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETED);
2075 }
2076 
2077 /* This cleans up all memory. It is only safe to use if the rest of the software stack
2078  * has been shut down */
2079 static void
2080 _spdk_nvmf_rdma_qp_cleanup_all_states(struct spdk_nvmf_rdma_qpair *rqpair)
2081 {
2082 	_spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
2083 
2084 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTING);
2085 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
2086 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
2087 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
2088 }
2089 
2090 static void
2091 _spdk_nvmf_rdma_qp_error(void *arg)
2092 {
2093 	struct spdk_nvmf_rdma_qpair	*rqpair = arg;
2094 	enum ibv_qp_state		state;
2095 
2096 	state = rqpair->ibv_attr.qp_state;
2097 	if (state != IBV_QPS_ERR) {
2098 		/* Error was already recovered */
2099 		return;
2100 	}
2101 
2102 	if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
2103 		spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
2104 	}
2105 
2106 	_spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
2107 
2108 	/* Attempt recovery. This will exit without recovering if I/O requests
2109 	 * are still outstanding */
2110 	spdk_nvmf_rdma_qpair_recover(rqpair);
2111 }
2112 
2113 static void
2114 _spdk_nvmf_rdma_qp_last_wqe(void *arg)
2115 {
2116 	struct spdk_nvmf_rdma_qpair	*rqpair = arg;
2117 	enum ibv_qp_state		state;
2118 
2119 	state = rqpair->ibv_attr.qp_state;
2120 	if (state != IBV_QPS_ERR) {
2121 		/* Error was already recovered */
2122 		return;
2123 	}
2124 
2125 	/* Clear out the states that are safe to clear any time, plus the
2126 	 * RDMA data transfer states. */
2127 	_spdk_nvmf_rdma_qp_cleanup_safe_states(rqpair);
2128 
2129 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
2130 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
2131 	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
2132 
2133 	spdk_nvmf_rdma_qpair_recover(rqpair);
2134 }
2135 
2136 static void
2137 spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
2138 {
2139 	int				rc;
2140 	struct spdk_nvmf_rdma_qpair	*rqpair;
2141 	struct ibv_async_event		event;
2142 	enum ibv_qp_state		state;
2143 
2144 	rc = ibv_get_async_event(device->context, &event);
2145 
2146 	if (rc) {
2147 		SPDK_ERRLOG("Failed to get async_event (%d): %s\n",
2148 			    errno, spdk_strerror(errno));
2149 		return;
2150 	}
2151 
2152 	SPDK_NOTICELOG("Async event: %s\n",
2153 		       ibv_event_type_str(event.event_type));
2154 
2155 	rqpair = event.element.qp->qp_context;
2156 
2157 	switch (event.event_type) {
2158 	case IBV_EVENT_QP_FATAL:
2159 		spdk_nvmf_rdma_update_ibv_state(rqpair);
2160 		spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
2161 		break;
2162 	case IBV_EVENT_QP_LAST_WQE_REACHED:
2163 		spdk_nvmf_rdma_update_ibv_state(rqpair);
2164 		spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair);
2165 		break;
2166 	case IBV_EVENT_SQ_DRAINED:
2167 		/* This event occurs frequently in both error and non-error states.
2168 		 * Check if the qpair is in an error state before sending a message.
2169 		 * Note that we're not on the correct thread to access the qpair, but
2170 		 * the operations that the below calls make all happen to be thread
2171 		 * safe. */
2172 		state = spdk_nvmf_rdma_update_ibv_state(rqpair);
2173 		if (state == IBV_QPS_ERR) {
2174 			spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
2175 		}
2176 		break;
2177 	case IBV_EVENT_QP_REQ_ERR:
2178 	case IBV_EVENT_QP_ACCESS_ERR:
2179 	case IBV_EVENT_COMM_EST:
2180 	case IBV_EVENT_PATH_MIG:
2181 	case IBV_EVENT_PATH_MIG_ERR:
2182 		spdk_nvmf_rdma_update_ibv_state(rqpair);
2183 		break;
2184 	case IBV_EVENT_CQ_ERR:
2185 	case IBV_EVENT_DEVICE_FATAL:
2186 	case IBV_EVENT_PORT_ACTIVE:
2187 	case IBV_EVENT_PORT_ERR:
2188 	case IBV_EVENT_LID_CHANGE:
2189 	case IBV_EVENT_PKEY_CHANGE:
2190 	case IBV_EVENT_SM_CHANGE:
2191 	case IBV_EVENT_SRQ_ERR:
2192 	case IBV_EVENT_SRQ_LIMIT_REACHED:
2193 	case IBV_EVENT_CLIENT_REREGISTER:
2194 	case IBV_EVENT_GID_CHANGE:
2195 	default:
2196 		break;
2197 	}
2198 	ibv_ack_async_event(&event);
2199 }
2200 
2201 static void
2202 spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
2203 {
2204 	int	nfds, i = 0;
2205 	struct spdk_nvmf_rdma_transport *rtransport;
2206 	struct spdk_nvmf_rdma_device *device, *tmp;
2207 
2208 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2209 	nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
2210 
2211 	if (nfds <= 0) {
2212 		return;
2213 	}
2214 
2215 	/* The first poll descriptor is RDMA CM event */
2216 	if (rtransport->poll_fds[i++].revents & POLLIN) {
2217 		spdk_nvmf_process_cm_event(transport, cb_fn);
2218 		nfds--;
2219 	}
2220 
2221 	if (nfds == 0) {
2222 		return;
2223 	}
2224 
2225 	/* Second and subsequent poll descriptors are IB async events */
2226 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2227 		if (rtransport->poll_fds[i++].revents & POLLIN) {
2228 			spdk_nvmf_process_ib_event(device);
2229 			nfds--;
2230 		}
2231 	}
2232 	/* check all flagged fd's have been served */
2233 	assert(nfds == 0);
2234 }
2235 
2236 static void
2237 spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
2238 			struct spdk_nvme_transport_id *trid,
2239 			struct spdk_nvmf_discovery_log_page_entry *entry)
2240 {
2241 	entry->trtype = SPDK_NVMF_TRTYPE_RDMA;
2242 	entry->adrfam = trid->adrfam;
2243 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2244 
2245 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2246 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2247 
2248 	entry->tsas.rdma.rdma_qptype = SPDK_NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED;
2249 	entry->tsas.rdma.rdma_prtype = SPDK_NVMF_RDMA_PRTYPE_NONE;
2250 	entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
2251 }
2252 
2253 static struct spdk_nvmf_transport_poll_group *
2254 spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
2255 {
2256 	struct spdk_nvmf_rdma_transport		*rtransport;
2257 	struct spdk_nvmf_rdma_poll_group	*rgroup;
2258 	struct spdk_nvmf_rdma_poller		*poller;
2259 	struct spdk_nvmf_rdma_device		*device;
2260 
2261 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2262 
2263 	rgroup = calloc(1, sizeof(*rgroup));
2264 	if (!rgroup) {
2265 		return NULL;
2266 	}
2267 
2268 	TAILQ_INIT(&rgroup->pollers);
2269 
2270 	pthread_mutex_lock(&rtransport->lock);
2271 	TAILQ_FOREACH(device, &rtransport->devices, link) {
2272 		poller = calloc(1, sizeof(*poller));
2273 		if (!poller) {
2274 			SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n");
2275 			free(rgroup);
2276 			pthread_mutex_unlock(&rtransport->lock);
2277 			return NULL;
2278 		}
2279 
2280 		poller->device = device;
2281 		poller->group = rgroup;
2282 
2283 		TAILQ_INIT(&poller->qpairs);
2284 
2285 		poller->cq = ibv_create_cq(device->context, NVMF_RDMA_CQ_SIZE, poller, NULL, 0);
2286 		if (!poller->cq) {
2287 			SPDK_ERRLOG("Unable to create completion queue\n");
2288 			free(poller);
2289 			free(rgroup);
2290 			pthread_mutex_unlock(&rtransport->lock);
2291 			return NULL;
2292 		}
2293 
2294 		TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
2295 	}
2296 
2297 	pthread_mutex_unlock(&rtransport->lock);
2298 	return &rgroup->group;
2299 }
2300 
2301 static void
2302 spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2303 {
2304 	struct spdk_nvmf_rdma_poll_group	*rgroup;
2305 	struct spdk_nvmf_rdma_poller		*poller, *tmp;
2306 	struct spdk_nvmf_rdma_qpair		*qpair, *tmp_qpair;
2307 
2308 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
2309 
2310 	if (!rgroup) {
2311 		return;
2312 	}
2313 
2314 	TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) {
2315 		TAILQ_REMOVE(&rgroup->pollers, poller, link);
2316 
2317 		if (poller->cq) {
2318 			ibv_destroy_cq(poller->cq);
2319 		}
2320 		TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) {
2321 			_spdk_nvmf_rdma_qp_cleanup_all_states(qpair);
2322 			spdk_nvmf_rdma_qpair_destroy(qpair);
2323 		}
2324 
2325 		free(poller);
2326 	}
2327 
2328 	free(rgroup);
2329 }
2330 
2331 static int
2332 spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2333 			      struct spdk_nvmf_qpair *qpair)
2334 {
2335 	struct spdk_nvmf_rdma_transport		*rtransport;
2336 	struct spdk_nvmf_rdma_poll_group	*rgroup;
2337 	struct spdk_nvmf_rdma_qpair		*rqpair;
2338 	struct spdk_nvmf_rdma_device		*device;
2339 	struct spdk_nvmf_rdma_poller		*poller;
2340 	int					rc;
2341 
2342 	rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
2343 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
2344 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
2345 
2346 	device = rqpair->port->device;
2347 
2348 	TAILQ_FOREACH(poller, &rgroup->pollers, link) {
2349 		if (poller->device == device) {
2350 			break;
2351 		}
2352 	}
2353 
2354 	if (!poller) {
2355 		SPDK_ERRLOG("No poller found for device.\n");
2356 		return -1;
2357 	}
2358 
2359 	TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
2360 	rqpair->poller = poller;
2361 
2362 	rc = spdk_nvmf_rdma_qpair_initialize(qpair);
2363 	if (rc < 0) {
2364 		SPDK_ERRLOG("Failed to initialize nvmf_rdma_qpair with qpair=%p\n", qpair);
2365 		return -1;
2366 	}
2367 
2368 	rqpair->mgmt_channel = spdk_get_io_channel(rtransport);
2369 	if (!rqpair->mgmt_channel) {
2370 		spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
2371 		spdk_nvmf_rdma_qpair_destroy(rqpair);
2372 		return -1;
2373 	}
2374 
2375 	rqpair->ch = spdk_io_channel_get_ctx(rqpair->mgmt_channel);
2376 	assert(rqpair->ch != NULL);
2377 
2378 	rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
2379 	if (rc) {
2380 		/* Try to reject, but we probably can't */
2381 		spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
2382 		spdk_nvmf_rdma_qpair_destroy(rqpair);
2383 		return -1;
2384 	}
2385 
2386 	spdk_nvmf_rdma_update_ibv_state(rqpair);
2387 
2388 	return 0;
2389 }
2390 
2391 static int
2392 spdk_nvmf_rdma_request_free(struct spdk_nvmf_request *req)
2393 {
2394 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
2395 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
2396 			struct spdk_nvmf_rdma_transport, transport);
2397 
2398 	if (rdma_req->data_from_pool) {
2399 		/* Put the buffer/s back in the pool */
2400 		for (uint32_t i = 0; i < rdma_req->req.iovcnt; i++) {
2401 			spdk_mempool_put(rtransport->data_buf_pool, rdma_req->data.buffers[i]);
2402 			rdma_req->req.iov[i].iov_base = NULL;
2403 			rdma_req->data.buffers[i] = NULL;
2404 		}
2405 		rdma_req->data_from_pool = false;
2406 	}
2407 	rdma_req->req.length = 0;
2408 	rdma_req->req.iovcnt = 0;
2409 	rdma_req->req.data = NULL;
2410 	spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_FREE);
2411 	return 0;
2412 }
2413 
2414 static int
2415 spdk_nvmf_rdma_request_complete(struct spdk_nvmf_request *req)
2416 {
2417 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
2418 			struct spdk_nvmf_rdma_transport, transport);
2419 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req,
2420 			struct spdk_nvmf_rdma_request, req);
2421 	struct spdk_nvmf_rdma_qpair     *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
2422 			struct spdk_nvmf_rdma_qpair, qpair);
2423 
2424 	if (rqpair->ibv_attr.qp_state != IBV_QPS_ERR) {
2425 		/* The connection is alive, so process the request as normal */
2426 		spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_EXECUTED);
2427 	} else {
2428 		/* The connection is dead. Move the request directly to the completed state. */
2429 		spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
2430 	}
2431 
2432 	spdk_nvmf_rdma_request_process(rtransport, rdma_req);
2433 
2434 	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE && rqpair->ibv_attr.qp_state == IBV_QPS_ERR) {
2435 		/* If the NVMe-oF layer thinks the connection is active, but the RDMA layer thinks
2436 		 * the connection is dead, perform error recovery. */
2437 		spdk_nvmf_rdma_qpair_recover(rqpair);
2438 	}
2439 
2440 	return 0;
2441 }
2442 
2443 static void
2444 spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
2445 {
2446 	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
2447 
2448 	spdk_nvmf_rdma_qpair_destroy(rqpair);
2449 }
2450 
2451 static struct spdk_nvmf_rdma_request *
2452 get_rdma_req_from_wc(struct ibv_wc *wc)
2453 {
2454 	struct spdk_nvmf_rdma_request *rdma_req;
2455 
2456 	rdma_req = (struct spdk_nvmf_rdma_request *)wc->wr_id;
2457 	assert(rdma_req != NULL);
2458 
2459 #ifdef DEBUG
2460 	struct spdk_nvmf_rdma_qpair *rqpair;
2461 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2462 
2463 	assert(rdma_req - rqpair->reqs >= 0);
2464 	assert(rdma_req - rqpair->reqs < (ptrdiff_t)rqpair->max_queue_depth);
2465 #endif
2466 
2467 	return rdma_req;
2468 }
2469 
2470 static struct spdk_nvmf_rdma_recv *
2471 get_rdma_recv_from_wc(struct ibv_wc *wc)
2472 {
2473 	struct spdk_nvmf_rdma_recv *rdma_recv;
2474 
2475 	assert(wc->byte_len >= sizeof(struct spdk_nvmf_capsule_cmd));
2476 
2477 	rdma_recv = (struct spdk_nvmf_rdma_recv *)wc->wr_id;
2478 	assert(rdma_recv != NULL);
2479 
2480 #ifdef DEBUG
2481 	struct spdk_nvmf_rdma_qpair *rqpair = rdma_recv->qpair;
2482 
2483 	assert(rdma_recv - rqpair->recvs >= 0);
2484 	assert(rdma_recv - rqpair->recvs < (ptrdiff_t)rqpair->max_queue_depth);
2485 #endif
2486 
2487 	return rdma_recv;
2488 }
2489 
2490 #ifdef DEBUG
2491 static int
2492 spdk_nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
2493 {
2494 	return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
2495 	       rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
2496 }
2497 #endif
2498 
2499 static int
2500 spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
2501 			   struct spdk_nvmf_rdma_poller *rpoller)
2502 {
2503 	struct ibv_wc wc[32];
2504 	struct spdk_nvmf_rdma_request	*rdma_req;
2505 	struct spdk_nvmf_rdma_recv	*rdma_recv;
2506 	struct spdk_nvmf_rdma_qpair	*rqpair;
2507 	int reaped, i;
2508 	int count = 0;
2509 	bool error = false;
2510 
2511 	/* Poll for completing operations. */
2512 	reaped = ibv_poll_cq(rpoller->cq, 32, wc);
2513 	if (reaped < 0) {
2514 		SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
2515 			    errno, spdk_strerror(errno));
2516 		return -1;
2517 	}
2518 
2519 	for (i = 0; i < reaped; i++) {
2520 		/* Handle error conditions */
2521 		if (wc[i].status) {
2522 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "CQ error on CQ %p, Request 0x%lu (%d): %s\n",
2523 				      rpoller->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
2524 			error = true;
2525 
2526 			switch (wc[i].opcode) {
2527 			case IBV_WC_SEND:
2528 			case IBV_WC_RDMA_WRITE:
2529 			case IBV_WC_RDMA_READ:
2530 				rdma_req = get_rdma_req_from_wc(&wc[i]);
2531 				rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2532 
2533 				/* We're going to kill the connection, so force the request into
2534 				 * the completed state. */
2535 				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
2536 				spdk_nvmf_rdma_request_process(rtransport, rdma_req);
2537 				break;
2538 			case IBV_WC_RECV:
2539 				rdma_recv = get_rdma_recv_from_wc(&wc[i]);
2540 				rqpair = rdma_recv->qpair;
2541 
2542 				/* Dump this into the incoming queue. This gets cleaned up when
2543 				 * the queue pair disconnects. */
2544 				TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
2545 			default:
2546 				SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
2547 				continue;
2548 			}
2549 
2550 			/* Begin disconnecting the qpair. This is ok to call multiple times if lots of
2551 			 * errors occur on the same qpair in the same ibv_poll_cq batch. */
2552 			spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
2553 
2554 			continue;
2555 		}
2556 
2557 		switch (wc[i].opcode) {
2558 		case IBV_WC_SEND:
2559 			rdma_req = get_rdma_req_from_wc(&wc[i]);
2560 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2561 
2562 			assert(spdk_nvmf_rdma_req_is_completing(rdma_req));
2563 
2564 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
2565 			spdk_nvmf_rdma_request_process(rtransport, rdma_req);
2566 
2567 			count++;
2568 
2569 			/* Try to process other queued requests */
2570 			spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
2571 			break;
2572 
2573 		case IBV_WC_RDMA_WRITE:
2574 			rdma_req = get_rdma_req_from_wc(&wc[i]);
2575 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2576 
2577 			/* Try to process other queued requests */
2578 			spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
2579 			break;
2580 
2581 		case IBV_WC_RDMA_READ:
2582 			rdma_req = get_rdma_req_from_wc(&wc[i]);
2583 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2584 
2585 			assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
2586 			spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
2587 			spdk_nvmf_rdma_request_process(rtransport, rdma_req);
2588 
2589 			/* Try to process other queued requests */
2590 			spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
2591 			break;
2592 
2593 		case IBV_WC_RECV:
2594 			rdma_recv = get_rdma_recv_from_wc(&wc[i]);
2595 			rqpair = rdma_recv->qpair;
2596 
2597 			TAILQ_INSERT_TAIL(&rqpair->incoming_queue, rdma_recv, link);
2598 			/* Try to process other queued requests */
2599 			spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair);
2600 			break;
2601 
2602 		default:
2603 			SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
2604 			continue;
2605 		}
2606 	}
2607 
2608 	if (error == true) {
2609 		return -1;
2610 	}
2611 
2612 	return count;
2613 }
2614 
2615 static int
2616 spdk_nvmf_rdma_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2617 {
2618 	struct spdk_nvmf_rdma_transport *rtransport;
2619 	struct spdk_nvmf_rdma_poll_group *rgroup;
2620 	struct spdk_nvmf_rdma_poller	*rpoller;
2621 	int				count, rc;
2622 
2623 	rtransport = SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_rdma_transport, transport);
2624 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
2625 
2626 	count = 0;
2627 	TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
2628 		rc = spdk_nvmf_rdma_poller_poll(rtransport, rpoller);
2629 		if (rc < 0) {
2630 			return rc;
2631 		}
2632 		count += rc;
2633 	}
2634 
2635 	return count;
2636 }
2637 
2638 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
2639 	.type = SPDK_NVME_TRANSPORT_RDMA,
2640 	.create = spdk_nvmf_rdma_create,
2641 	.destroy = spdk_nvmf_rdma_destroy,
2642 
2643 	.listen = spdk_nvmf_rdma_listen,
2644 	.stop_listen = spdk_nvmf_rdma_stop_listen,
2645 	.accept = spdk_nvmf_rdma_accept,
2646 
2647 	.listener_discover = spdk_nvmf_rdma_discover,
2648 
2649 	.poll_group_create = spdk_nvmf_rdma_poll_group_create,
2650 	.poll_group_destroy = spdk_nvmf_rdma_poll_group_destroy,
2651 	.poll_group_add = spdk_nvmf_rdma_poll_group_add,
2652 	.poll_group_poll = spdk_nvmf_rdma_poll_group_poll,
2653 
2654 	.req_free = spdk_nvmf_rdma_request_free,
2655 	.req_complete = spdk_nvmf_rdma_request_complete,
2656 
2657 	.qpair_fini = spdk_nvmf_rdma_close_qpair,
2658 	.qpair_is_idle = spdk_nvmf_rdma_qpair_is_idle,
2659 
2660 };
2661 
2662 SPDK_LOG_REGISTER_COMPONENT("rdma", SPDK_LOG_RDMA)
2663