xref: /spdk/lib/nvmf/rdma.c (revision 543d8b7b67d8e791511b123b89f02e9e6fcd6c1f)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2018 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include <infiniband/verbs.h>
37 #include <rdma/rdma_cma.h>
38 #include <rdma/rdma_verbs.h>
39 
40 #include "nvmf_internal.h"
41 #include "transport.h"
42 
43 #include "spdk/config.h"
44 #include "spdk/assert.h"
45 #include "spdk/thread.h"
46 #include "spdk/nvmf.h"
47 #include "spdk/nvmf_spec.h"
48 #include "spdk/string.h"
49 #include "spdk/trace.h"
50 #include "spdk/util.h"
51 
52 #include "spdk_internal/log.h"
53 
54 struct spdk_nvme_rdma_hooks g_nvmf_hooks = {};
55 
56 /*
57  RDMA Connection Resource Defaults
58  */
59 #define NVMF_DEFAULT_TX_SGE		SPDK_NVMF_MAX_SGL_ENTRIES
60 #define NVMF_DEFAULT_RSP_SGE		1
61 #define NVMF_DEFAULT_RX_SGE		2
62 
63 /* The RDMA completion queue size */
64 #define DEFAULT_NVMF_RDMA_CQ_SIZE	4096
65 #define MAX_WR_PER_QP(queue_depth)	(queue_depth * 3 + 2)
66 
67 /* Timeout for destroying defunct rqpairs */
68 #define NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US	4000000
69 
70 /* The maximum number of buffers per request */
71 #define NVMF_REQ_MAX_BUFFERS	(SPDK_NVMF_MAX_SGL_ENTRIES * 2)
72 
73 static int g_spdk_nvmf_ibv_query_mask =
74 	IBV_QP_STATE |
75 	IBV_QP_PKEY_INDEX |
76 	IBV_QP_PORT |
77 	IBV_QP_ACCESS_FLAGS |
78 	IBV_QP_AV |
79 	IBV_QP_PATH_MTU |
80 	IBV_QP_DEST_QPN |
81 	IBV_QP_RQ_PSN |
82 	IBV_QP_MAX_DEST_RD_ATOMIC |
83 	IBV_QP_MIN_RNR_TIMER |
84 	IBV_QP_SQ_PSN |
85 	IBV_QP_TIMEOUT |
86 	IBV_QP_RETRY_CNT |
87 	IBV_QP_RNR_RETRY |
88 	IBV_QP_MAX_QP_RD_ATOMIC;
89 
90 enum spdk_nvmf_rdma_request_state {
91 	/* The request is not currently in use */
92 	RDMA_REQUEST_STATE_FREE = 0,
93 
94 	/* Initial state when request first received */
95 	RDMA_REQUEST_STATE_NEW,
96 
97 	/* The request is queued until a data buffer is available. */
98 	RDMA_REQUEST_STATE_NEED_BUFFER,
99 
100 	/* The request is waiting on RDMA queue depth availability
101 	 * to transfer data from the host to the controller.
102 	 */
103 	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
104 
105 	/* The request is currently transferring data from the host to the controller. */
106 	RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
107 
108 	/* The request is ready to execute at the block device */
109 	RDMA_REQUEST_STATE_READY_TO_EXECUTE,
110 
111 	/* The request is currently executing at the block device */
112 	RDMA_REQUEST_STATE_EXECUTING,
113 
114 	/* The request finished executing at the block device */
115 	RDMA_REQUEST_STATE_EXECUTED,
116 
117 	/* The request is waiting on RDMA queue depth availability
118 	 * to transfer data from the controller to the host.
119 	 */
120 	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
121 
122 	/* The request is ready to send a completion */
123 	RDMA_REQUEST_STATE_READY_TO_COMPLETE,
124 
125 	/* The request is currently transferring data from the controller to the host. */
126 	RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
127 
128 	/* The request currently has an outstanding completion without an
129 	 * associated data transfer.
130 	 */
131 	RDMA_REQUEST_STATE_COMPLETING,
132 
133 	/* The request completed and can be marked free. */
134 	RDMA_REQUEST_STATE_COMPLETED,
135 
136 	/* Terminator */
137 	RDMA_REQUEST_NUM_STATES,
138 };
139 
140 #define OBJECT_NVMF_RDMA_IO				0x40
141 
142 #define TRACE_GROUP_NVMF_RDMA				0x4
143 #define TRACE_RDMA_REQUEST_STATE_NEW					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x0)
144 #define TRACE_RDMA_REQUEST_STATE_NEED_BUFFER				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x1)
145 #define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING	SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x2)
146 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER	SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x3)
147 #define TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x4)
148 #define TRACE_RDMA_REQUEST_STATE_EXECUTING				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x5)
149 #define TRACE_RDMA_REQUEST_STATE_EXECUTED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x6)
150 #define TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING		SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x7)
151 #define TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE			SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x8)
152 #define TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST	SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x9)
153 #define TRACE_RDMA_REQUEST_STATE_COMPLETING				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xA)
154 #define TRACE_RDMA_REQUEST_STATE_COMPLETED				SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xB)
155 #define TRACE_RDMA_QP_CREATE						SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xC)
156 #define TRACE_RDMA_IBV_ASYNC_EVENT					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xD)
157 #define TRACE_RDMA_CM_ASYNC_EVENT					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xE)
158 #define TRACE_RDMA_QP_STATE_CHANGE					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0xF)
159 #define TRACE_RDMA_QP_DISCONNECT					SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x10)
160 #define TRACE_RDMA_QP_DESTROY						SPDK_TPOINT_ID(TRACE_GROUP_NVMF_RDMA, 0x11)
161 
162 SPDK_TRACE_REGISTER_FN(nvmf_trace, "nvmf_rdma", TRACE_GROUP_NVMF_RDMA)
163 {
164 	spdk_trace_register_object(OBJECT_NVMF_RDMA_IO, 'r');
165 	spdk_trace_register_description("RDMA_REQ_NEW", TRACE_RDMA_REQUEST_STATE_NEW,
166 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 1, 1, "cmid:   ");
167 	spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", TRACE_RDMA_REQUEST_STATE_NEED_BUFFER,
168 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
169 	spdk_trace_register_description("RDMA_REQ_TX_PENDING_C2H",
170 					TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
171 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
172 	spdk_trace_register_description("RDMA_REQ_TX_PENDING_H2C",
173 					TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
174 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
175 	spdk_trace_register_description("RDMA_REQ_TX_H2C",
176 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
177 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
178 	spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE",
179 					TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE,
180 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
181 	spdk_trace_register_description("RDMA_REQ_EXECUTING",
182 					TRACE_RDMA_REQUEST_STATE_EXECUTING,
183 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
184 	spdk_trace_register_description("RDMA_REQ_EXECUTED",
185 					TRACE_RDMA_REQUEST_STATE_EXECUTED,
186 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
187 	spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPL",
188 					TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE,
189 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
190 	spdk_trace_register_description("RDMA_REQ_COMPLETING_C2H",
191 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
192 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
193 	spdk_trace_register_description("RDMA_REQ_COMPLETING",
194 					TRACE_RDMA_REQUEST_STATE_COMPLETING,
195 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
196 	spdk_trace_register_description("RDMA_REQ_COMPLETED",
197 					TRACE_RDMA_REQUEST_STATE_COMPLETED,
198 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0, 1, "cmid:   ");
199 
200 	spdk_trace_register_description("RDMA_QP_CREATE", TRACE_RDMA_QP_CREATE,
201 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
202 	spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", TRACE_RDMA_IBV_ASYNC_EVENT,
203 					OWNER_NONE, OBJECT_NONE, 0, 0, "type:   ");
204 	spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", TRACE_RDMA_CM_ASYNC_EVENT,
205 					OWNER_NONE, OBJECT_NONE, 0, 0, "type:   ");
206 	spdk_trace_register_description("RDMA_QP_STATE_CHANGE", TRACE_RDMA_QP_STATE_CHANGE,
207 					OWNER_NONE, OBJECT_NONE, 0, 1, "state:  ");
208 	spdk_trace_register_description("RDMA_QP_DISCONNECT", TRACE_RDMA_QP_DISCONNECT,
209 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
210 	spdk_trace_register_description("RDMA_QP_DESTROY", TRACE_RDMA_QP_DESTROY,
211 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
212 }
213 
214 enum spdk_nvmf_rdma_wr_type {
215 	RDMA_WR_TYPE_RECV,
216 	RDMA_WR_TYPE_SEND,
217 	RDMA_WR_TYPE_DATA,
218 };
219 
220 struct spdk_nvmf_rdma_wr {
221 	enum spdk_nvmf_rdma_wr_type	type;
222 };
223 
224 /* This structure holds commands as they are received off the wire.
225  * It must be dynamically paired with a full request object
226  * (spdk_nvmf_rdma_request) to service a request. It is separate
227  * from the request because RDMA does not appear to order
228  * completions, so occasionally we'll get a new incoming
229  * command when there aren't any free request objects.
230  */
231 struct spdk_nvmf_rdma_recv {
232 	struct ibv_recv_wr			wr;
233 	struct ibv_sge				sgl[NVMF_DEFAULT_RX_SGE];
234 
235 	struct spdk_nvmf_rdma_qpair		*qpair;
236 
237 	/* In-capsule data buffer */
238 	uint8_t					*buf;
239 
240 	struct spdk_nvmf_rdma_wr		rdma_wr;
241 
242 	STAILQ_ENTRY(spdk_nvmf_rdma_recv)	link;
243 };
244 
245 struct spdk_nvmf_rdma_request_data {
246 	struct spdk_nvmf_rdma_wr	rdma_wr;
247 	struct ibv_send_wr		wr;
248 	struct ibv_sge			sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
249 };
250 
251 struct spdk_nvmf_rdma_request {
252 	struct spdk_nvmf_request		req;
253 	bool					data_from_pool;
254 
255 	enum spdk_nvmf_rdma_request_state	state;
256 
257 	struct spdk_nvmf_rdma_recv		*recv;
258 
259 	struct {
260 		struct spdk_nvmf_rdma_wr	rdma_wr;
261 		struct	ibv_send_wr		wr;
262 		struct	ibv_sge			sgl[NVMF_DEFAULT_RSP_SGE];
263 	} rsp;
264 
265 	struct spdk_nvmf_rdma_request_data	data;
266 	void					*buffers[NVMF_REQ_MAX_BUFFERS];
267 
268 	uint32_t				num_outstanding_data_wr;
269 
270 	STAILQ_ENTRY(spdk_nvmf_rdma_request)	state_link;
271 };
272 
273 enum spdk_nvmf_rdma_qpair_disconnect_flags {
274 	RDMA_QP_DISCONNECTING		= 1,
275 	RDMA_QP_RECV_DRAINED		= 1 << 1,
276 	RDMA_QP_SEND_DRAINED		= 1 << 2
277 };
278 
279 struct spdk_nvmf_rdma_resource_opts {
280 	struct spdk_nvmf_rdma_qpair	*qpair;
281 	/* qp points either to an ibv_qp object or an ibv_srq object depending on the value of shared. */
282 	void				*qp;
283 	struct ibv_pd			*pd;
284 	uint32_t			max_queue_depth;
285 	uint32_t			in_capsule_data_size;
286 	bool				shared;
287 };
288 
289 struct spdk_nvmf_send_wr_list {
290 	struct ibv_send_wr	*first;
291 	struct ibv_send_wr	*last;
292 };
293 
294 struct spdk_nvmf_recv_wr_list {
295 	struct ibv_recv_wr	*first;
296 	struct ibv_recv_wr	*last;
297 };
298 
299 struct spdk_nvmf_rdma_resources {
300 	/* Array of size "max_queue_depth" containing RDMA requests. */
301 	struct spdk_nvmf_rdma_request		*reqs;
302 
303 	/* Array of size "max_queue_depth" containing RDMA recvs. */
304 	struct spdk_nvmf_rdma_recv		*recvs;
305 
306 	/* Array of size "max_queue_depth" containing 64 byte capsules
307 	 * used for receive.
308 	 */
309 	union nvmf_h2c_msg			*cmds;
310 	struct ibv_mr				*cmds_mr;
311 
312 	/* Array of size "max_queue_depth" containing 16 byte completions
313 	 * to be sent back to the user.
314 	 */
315 	union nvmf_c2h_msg			*cpls;
316 	struct ibv_mr				*cpls_mr;
317 
318 	/* Array of size "max_queue_depth * InCapsuleDataSize" containing
319 	 * buffers to be used for in capsule data.
320 	 */
321 	void					*bufs;
322 	struct ibv_mr				*bufs_mr;
323 
324 	/* The list of pending recvs to transfer */
325 	struct spdk_nvmf_recv_wr_list		recvs_to_post;
326 
327 	/* Receives that are waiting for a request object */
328 	STAILQ_HEAD(, spdk_nvmf_rdma_recv)	incoming_queue;
329 
330 	/* Queue to track free requests */
331 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	free_queue;
332 };
333 
334 struct spdk_nvmf_rdma_qpair {
335 	struct spdk_nvmf_qpair			qpair;
336 
337 	struct spdk_nvmf_rdma_port		*port;
338 	struct spdk_nvmf_rdma_poller		*poller;
339 
340 	struct rdma_cm_id			*cm_id;
341 	struct ibv_srq				*srq;
342 	struct rdma_cm_id			*listen_id;
343 
344 	/* The maximum number of I/O outstanding on this connection at one time */
345 	uint16_t				max_queue_depth;
346 
347 	/* The maximum number of active RDMA READ and ATOMIC operations at one time */
348 	uint16_t				max_read_depth;
349 
350 	/* The maximum number of RDMA SEND operations at one time */
351 	uint32_t				max_send_depth;
352 
353 	/* The current number of outstanding WRs from this qpair's
354 	 * recv queue. Should not exceed device->attr.max_queue_depth.
355 	 */
356 	uint16_t				current_recv_depth;
357 
358 	/* The current number of active RDMA READ operations */
359 	uint16_t				current_read_depth;
360 
361 	/* The current number of posted WRs from this qpair's
362 	 * send queue. Should not exceed max_send_depth.
363 	 */
364 	uint32_t				current_send_depth;
365 
366 	/* The maximum number of SGEs per WR on the send queue */
367 	uint32_t				max_send_sge;
368 
369 	/* The maximum number of SGEs per WR on the recv queue */
370 	uint32_t				max_recv_sge;
371 
372 	/* The list of pending send requests for a transfer */
373 	struct spdk_nvmf_send_wr_list		sends_to_post;
374 
375 	struct spdk_nvmf_rdma_resources		*resources;
376 
377 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_rdma_read_queue;
378 
379 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_rdma_write_queue;
380 
381 	/* Number of requests not in the free state */
382 	uint32_t				qd;
383 
384 	TAILQ_ENTRY(spdk_nvmf_rdma_qpair)	link;
385 
386 	STAILQ_ENTRY(spdk_nvmf_rdma_qpair)	recv_link;
387 
388 	STAILQ_ENTRY(spdk_nvmf_rdma_qpair)	send_link;
389 
390 	/* IBV queue pair attributes: they are used to manage
391 	 * qp state and recover from errors.
392 	 */
393 	enum ibv_qp_state			ibv_state;
394 
395 	uint32_t				disconnect_flags;
396 
397 	/* Poller registered in case the qpair doesn't properly
398 	 * complete the qpair destruct process and becomes defunct.
399 	 */
400 
401 	struct spdk_poller			*destruct_poller;
402 
403 	/* There are several ways a disconnect can start on a qpair
404 	 * and they are not all mutually exclusive. It is important
405 	 * that we only initialize one of these paths.
406 	 */
407 	bool					disconnect_started;
408 	/* Lets us know that we have received the last_wqe event. */
409 	bool					last_wqe_reached;
410 };
411 
412 struct spdk_nvmf_rdma_poller {
413 	struct spdk_nvmf_rdma_device		*device;
414 	struct spdk_nvmf_rdma_poll_group	*group;
415 
416 	int					num_cqe;
417 	int					required_num_wr;
418 	struct ibv_cq				*cq;
419 
420 	/* The maximum number of I/O outstanding on the shared receive queue at one time */
421 	uint16_t				max_srq_depth;
422 
423 	/* Shared receive queue */
424 	struct ibv_srq				*srq;
425 
426 	struct spdk_nvmf_rdma_resources		*resources;
427 
428 	TAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs;
429 
430 	STAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs_pending_recv;
431 
432 	STAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs_pending_send;
433 
434 	TAILQ_ENTRY(spdk_nvmf_rdma_poller)	link;
435 };
436 
437 struct spdk_nvmf_rdma_poll_group {
438 	struct spdk_nvmf_transport_poll_group	group;
439 
440 	/* Requests that are waiting to obtain a data buffer */
441 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_data_buf_queue;
442 
443 	TAILQ_HEAD(, spdk_nvmf_rdma_poller)	pollers;
444 };
445 
446 /* Assuming rdma_cm uses just one protection domain per ibv_context. */
447 struct spdk_nvmf_rdma_device {
448 	struct ibv_device_attr			attr;
449 	struct ibv_context			*context;
450 
451 	struct spdk_mem_map			*map;
452 	struct ibv_pd				*pd;
453 
454 	int					num_srq;
455 
456 	TAILQ_ENTRY(spdk_nvmf_rdma_device)	link;
457 };
458 
459 struct spdk_nvmf_rdma_port {
460 	struct spdk_nvme_transport_id		trid;
461 	struct rdma_cm_id			*id;
462 	struct spdk_nvmf_rdma_device		*device;
463 	uint32_t				ref;
464 	TAILQ_ENTRY(spdk_nvmf_rdma_port)	link;
465 };
466 
467 struct spdk_nvmf_rdma_transport {
468 	struct spdk_nvmf_transport	transport;
469 
470 	struct rdma_event_channel	*event_channel;
471 
472 	struct spdk_mempool		*data_wr_pool;
473 
474 	pthread_mutex_t			lock;
475 
476 	/* fields used to poll RDMA/IB events */
477 	nfds_t			npoll_fds;
478 	struct pollfd		*poll_fds;
479 
480 	TAILQ_HEAD(, spdk_nvmf_rdma_device)	devices;
481 	TAILQ_HEAD(, spdk_nvmf_rdma_port)	ports;
482 };
483 
484 static inline int
485 spdk_nvmf_rdma_check_ibv_state(enum ibv_qp_state state)
486 {
487 	switch (state) {
488 	case IBV_QPS_RESET:
489 	case IBV_QPS_INIT:
490 	case IBV_QPS_RTR:
491 	case IBV_QPS_RTS:
492 	case IBV_QPS_SQD:
493 	case IBV_QPS_SQE:
494 	case IBV_QPS_ERR:
495 		return 0;
496 	default:
497 		return -1;
498 	}
499 }
500 
501 static enum ibv_qp_state
502 spdk_nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
503 	enum ibv_qp_state old_state, new_state;
504 	struct ibv_qp_attr qp_attr;
505 	struct ibv_qp_init_attr init_attr;
506 	int rc;
507 
508 	old_state = rqpair->ibv_state;
509 	rc = ibv_query_qp(rqpair->cm_id->qp, &qp_attr,
510 			  g_spdk_nvmf_ibv_query_mask, &init_attr);
511 
512 	if (rc)
513 	{
514 		SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
515 		return IBV_QPS_ERR + 1;
516 	}
517 
518 	new_state = qp_attr.qp_state;
519 	rqpair->ibv_state = new_state;
520 	qp_attr.ah_attr.port_num = qp_attr.port_num;
521 
522 	rc = spdk_nvmf_rdma_check_ibv_state(new_state);
523 	if (rc)
524 	{
525 		SPDK_ERRLOG("QP#%d: bad state updated: %u, maybe hardware issue\n", rqpair->qpair.qid, new_state);
526 		/*
527 		 * IBV_QPS_UNKNOWN undefined if lib version smaller than libibverbs-1.1.8
528 		 * IBV_QPS_UNKNOWN is the enum element after IBV_QPS_ERR
529 		 */
530 		return IBV_QPS_ERR + 1;
531 	}
532 
533 	if (old_state != new_state)
534 	{
535 		spdk_trace_record(TRACE_RDMA_QP_STATE_CHANGE, 0, 0,
536 				  (uintptr_t)rqpair->cm_id, new_state);
537 	}
538 	return new_state;
539 }
540 
541 static const char *str_ibv_qp_state[] = {
542 	"IBV_QPS_RESET",
543 	"IBV_QPS_INIT",
544 	"IBV_QPS_RTR",
545 	"IBV_QPS_RTS",
546 	"IBV_QPS_SQD",
547 	"IBV_QPS_SQE",
548 	"IBV_QPS_ERR",
549 	"IBV_QPS_UNKNOWN"
550 };
551 
552 static int
553 spdk_nvmf_rdma_set_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair,
554 			     enum ibv_qp_state new_state)
555 {
556 	struct ibv_qp_attr qp_attr;
557 	struct ibv_qp_init_attr init_attr;
558 	int rc;
559 	enum ibv_qp_state state;
560 	static int attr_mask_rc[] = {
561 		[IBV_QPS_RESET] = IBV_QP_STATE,
562 		[IBV_QPS_INIT] = (IBV_QP_STATE |
563 				  IBV_QP_PKEY_INDEX |
564 				  IBV_QP_PORT |
565 				  IBV_QP_ACCESS_FLAGS),
566 		[IBV_QPS_RTR] = (IBV_QP_STATE |
567 				 IBV_QP_AV |
568 				 IBV_QP_PATH_MTU |
569 				 IBV_QP_DEST_QPN |
570 				 IBV_QP_RQ_PSN |
571 				 IBV_QP_MAX_DEST_RD_ATOMIC |
572 				 IBV_QP_MIN_RNR_TIMER),
573 		[IBV_QPS_RTS] = (IBV_QP_STATE |
574 				 IBV_QP_SQ_PSN |
575 				 IBV_QP_TIMEOUT |
576 				 IBV_QP_RETRY_CNT |
577 				 IBV_QP_RNR_RETRY |
578 				 IBV_QP_MAX_QP_RD_ATOMIC),
579 		[IBV_QPS_SQD] = IBV_QP_STATE,
580 		[IBV_QPS_SQE] = IBV_QP_STATE,
581 		[IBV_QPS_ERR] = IBV_QP_STATE,
582 	};
583 
584 	rc = spdk_nvmf_rdma_check_ibv_state(new_state);
585 	if (rc) {
586 		SPDK_ERRLOG("QP#%d: bad state requested: %u\n",
587 			    rqpair->qpair.qid, new_state);
588 		return rc;
589 	}
590 
591 	rc = ibv_query_qp(rqpair->cm_id->qp, &qp_attr,
592 			  g_spdk_nvmf_ibv_query_mask, &init_attr);
593 
594 	if (rc) {
595 		SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
596 		assert(false);
597 	}
598 
599 	qp_attr.cur_qp_state = rqpair->ibv_state;
600 	qp_attr.qp_state = new_state;
601 
602 	rc = ibv_modify_qp(rqpair->cm_id->qp, &qp_attr,
603 			   attr_mask_rc[new_state]);
604 
605 	if (rc) {
606 		SPDK_ERRLOG("QP#%d: failed to set state to: %s, %d (%s)\n",
607 			    rqpair->qpair.qid, str_ibv_qp_state[new_state], errno, strerror(errno));
608 		return rc;
609 	}
610 
611 	state = spdk_nvmf_rdma_update_ibv_state(rqpair);
612 
613 	if (state != new_state) {
614 		SPDK_ERRLOG("QP#%d: expected state: %s, actual state: %s\n",
615 			    rqpair->qpair.qid, str_ibv_qp_state[new_state],
616 			    str_ibv_qp_state[state]);
617 		return -1;
618 	}
619 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "IBV QP#%u changed to: %s\n", rqpair->qpair.qid,
620 		      str_ibv_qp_state[state]);
621 	return 0;
622 }
623 
624 static void
625 nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
626 			    struct spdk_nvmf_rdma_transport *rtransport)
627 {
628 	struct spdk_nvmf_rdma_request_data	*current_data_wr = NULL, *next_data_wr = NULL;
629 	struct ibv_send_wr			*send_wr;
630 	int					i;
631 
632 	rdma_req->num_outstanding_data_wr = 0;
633 	current_data_wr = &rdma_req->data;
634 	for (i = 0; i < current_data_wr->wr.num_sge; i++) {
635 		current_data_wr->wr.sg_list[i].addr = 0;
636 		current_data_wr->wr.sg_list[i].length = 0;
637 		current_data_wr->wr.sg_list[i].lkey = 0;
638 	}
639 	current_data_wr->wr.num_sge = 0;
640 
641 	send_wr = current_data_wr->wr.next;
642 	if (send_wr != NULL && send_wr != &rdma_req->rsp.wr) {
643 		next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
644 	}
645 	while (next_data_wr) {
646 		current_data_wr = next_data_wr;
647 		send_wr = current_data_wr->wr.next;
648 		if (send_wr != NULL && send_wr != &rdma_req->rsp.wr &&
649 		    send_wr->wr_id == current_data_wr->wr.wr_id) {
650 			next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
651 		} else {
652 			next_data_wr = NULL;
653 		}
654 
655 		for (i = 0; i < current_data_wr->wr.num_sge; i++) {
656 			current_data_wr->wr.sg_list[i].addr = 0;
657 			current_data_wr->wr.sg_list[i].length = 0;
658 			current_data_wr->wr.sg_list[i].lkey = 0;
659 		}
660 		current_data_wr->wr.num_sge = 0;
661 		spdk_mempool_put(rtransport->data_wr_pool, current_data_wr);
662 	}
663 }
664 
665 static void
666 nvmf_rdma_dump_request(struct spdk_nvmf_rdma_request *req)
667 {
668 	SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->data_from_pool);
669 	if (req->req.cmd) {
670 		SPDK_ERRLOG("\t\tRequest opcode: %d\n", req->req.cmd->nvmf_cmd.opcode);
671 	}
672 	if (req->recv) {
673 		SPDK_ERRLOG("\t\tRequest recv wr_id%lu\n", req->recv->wr.wr_id);
674 	}
675 }
676 
677 static void
678 nvmf_rdma_dump_qpair_contents(struct spdk_nvmf_rdma_qpair *rqpair)
679 {
680 	int i;
681 
682 	SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", rqpair->qpair.qid);
683 	for (i = 0; i < rqpair->max_queue_depth; i++) {
684 		if (rqpair->resources->reqs[i].state != RDMA_REQUEST_STATE_FREE) {
685 			nvmf_rdma_dump_request(&rqpair->resources->reqs[i]);
686 		}
687 	}
688 }
689 
690 static void
691 nvmf_rdma_resources_destroy(struct spdk_nvmf_rdma_resources *resources)
692 {
693 	if (resources->cmds_mr) {
694 		ibv_dereg_mr(resources->cmds_mr);
695 	}
696 
697 	if (resources->cpls_mr) {
698 		ibv_dereg_mr(resources->cpls_mr);
699 	}
700 
701 	if (resources->bufs_mr) {
702 		ibv_dereg_mr(resources->bufs_mr);
703 	}
704 
705 	spdk_dma_free(resources->cmds);
706 	spdk_dma_free(resources->cpls);
707 	spdk_dma_free(resources->bufs);
708 	free(resources->reqs);
709 	free(resources->recvs);
710 	free(resources);
711 }
712 
713 
714 static struct spdk_nvmf_rdma_resources *
715 nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
716 {
717 	struct spdk_nvmf_rdma_resources	*resources;
718 	struct spdk_nvmf_rdma_request	*rdma_req;
719 	struct spdk_nvmf_rdma_recv	*rdma_recv;
720 	struct ibv_qp			*qp;
721 	struct ibv_srq			*srq;
722 	uint32_t			i;
723 	int				rc;
724 
725 	resources = calloc(1, sizeof(struct spdk_nvmf_rdma_resources));
726 	if (!resources) {
727 		SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
728 		return NULL;
729 	}
730 
731 	resources->reqs = calloc(opts->max_queue_depth, sizeof(*resources->reqs));
732 	resources->recvs = calloc(opts->max_queue_depth, sizeof(*resources->recvs));
733 	resources->cmds = spdk_dma_zmalloc(opts->max_queue_depth * sizeof(*resources->cmds),
734 					   0x1000, NULL);
735 	resources->cpls = spdk_dma_zmalloc(opts->max_queue_depth * sizeof(*resources->cpls),
736 					   0x1000, NULL);
737 
738 	if (opts->in_capsule_data_size > 0) {
739 		resources->bufs = spdk_dma_zmalloc(opts->max_queue_depth *
740 						   opts->in_capsule_data_size,
741 						   0x1000, NULL);
742 	}
743 
744 	if (!resources->reqs || !resources->recvs || !resources->cmds ||
745 	    !resources->cpls || (opts->in_capsule_data_size && !resources->bufs)) {
746 		SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
747 		goto cleanup;
748 	}
749 
750 	resources->cmds_mr = ibv_reg_mr(opts->pd, resources->cmds,
751 					opts->max_queue_depth * sizeof(*resources->cmds),
752 					IBV_ACCESS_LOCAL_WRITE);
753 	resources->cpls_mr = ibv_reg_mr(opts->pd, resources->cpls,
754 					opts->max_queue_depth * sizeof(*resources->cpls),
755 					0);
756 
757 	if (opts->in_capsule_data_size) {
758 		resources->bufs_mr = ibv_reg_mr(opts->pd, resources->bufs,
759 						opts->max_queue_depth *
760 						opts->in_capsule_data_size,
761 						IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
762 	}
763 
764 	if (!resources->cmds_mr || !resources->cpls_mr ||
765 	    (opts->in_capsule_data_size &&
766 	     !resources->bufs_mr)) {
767 		goto cleanup;
768 	}
769 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Command Array: %p Length: %lx LKey: %x\n",
770 		      resources->cmds, opts->max_queue_depth * sizeof(*resources->cmds),
771 		      resources->cmds_mr->lkey);
772 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Completion Array: %p Length: %lx LKey: %x\n",
773 		      resources->cpls, opts->max_queue_depth * sizeof(*resources->cpls),
774 		      resources->cpls_mr->lkey);
775 	if (resources->bufs && resources->bufs_mr) {
776 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "In Capsule Data Array: %p Length: %x LKey: %x\n",
777 			      resources->bufs, opts->max_queue_depth *
778 			      opts->in_capsule_data_size, resources->bufs_mr->lkey);
779 	}
780 
781 	/* Initialize queues */
782 	STAILQ_INIT(&resources->incoming_queue);
783 	STAILQ_INIT(&resources->free_queue);
784 
785 	for (i = 0; i < opts->max_queue_depth; i++) {
786 		struct ibv_recv_wr *bad_wr = NULL;
787 
788 		rdma_recv = &resources->recvs[i];
789 		rdma_recv->qpair = opts->qpair;
790 
791 		/* Set up memory to receive commands */
792 		if (resources->bufs) {
793 			rdma_recv->buf = (void *)((uintptr_t)resources->bufs + (i *
794 						  opts->in_capsule_data_size));
795 		}
796 
797 		rdma_recv->rdma_wr.type = RDMA_WR_TYPE_RECV;
798 
799 		rdma_recv->sgl[0].addr = (uintptr_t)&resources->cmds[i];
800 		rdma_recv->sgl[0].length = sizeof(resources->cmds[i]);
801 		rdma_recv->sgl[0].lkey = resources->cmds_mr->lkey;
802 		rdma_recv->wr.num_sge = 1;
803 
804 		if (rdma_recv->buf && resources->bufs_mr) {
805 			rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
806 			rdma_recv->sgl[1].length = opts->in_capsule_data_size;
807 			rdma_recv->sgl[1].lkey = resources->bufs_mr->lkey;
808 			rdma_recv->wr.num_sge++;
809 		}
810 
811 		rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
812 		rdma_recv->wr.sg_list = rdma_recv->sgl;
813 		if (opts->shared) {
814 			srq = (struct ibv_srq *)opts->qp;
815 			rc = ibv_post_srq_recv(srq, &rdma_recv->wr, &bad_wr);
816 		} else {
817 			qp = (struct ibv_qp *)opts->qp;
818 			rc = ibv_post_recv(qp, &rdma_recv->wr, &bad_wr);
819 		}
820 		if (rc) {
821 			goto cleanup;
822 		}
823 	}
824 
825 	for (i = 0; i < opts->max_queue_depth; i++) {
826 		rdma_req = &resources->reqs[i];
827 
828 		if (opts->qpair != NULL) {
829 			rdma_req->req.qpair = &opts->qpair->qpair;
830 		} else {
831 			rdma_req->req.qpair = NULL;
832 		}
833 		rdma_req->req.cmd = NULL;
834 
835 		/* Set up memory to send responses */
836 		rdma_req->req.rsp = &resources->cpls[i];
837 
838 		rdma_req->rsp.sgl[0].addr = (uintptr_t)&resources->cpls[i];
839 		rdma_req->rsp.sgl[0].length = sizeof(resources->cpls[i]);
840 		rdma_req->rsp.sgl[0].lkey = resources->cpls_mr->lkey;
841 
842 		rdma_req->rsp.rdma_wr.type = RDMA_WR_TYPE_SEND;
843 		rdma_req->rsp.wr.wr_id = (uintptr_t)&rdma_req->rsp.rdma_wr;
844 		rdma_req->rsp.wr.next = NULL;
845 		rdma_req->rsp.wr.opcode = IBV_WR_SEND;
846 		rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
847 		rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl;
848 		rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
849 
850 		/* Set up memory for data buffers */
851 		rdma_req->data.rdma_wr.type = RDMA_WR_TYPE_DATA;
852 		rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
853 		rdma_req->data.wr.next = NULL;
854 		rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
855 		rdma_req->data.wr.sg_list = rdma_req->data.sgl;
856 		rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
857 
858 		/* Initialize request state to FREE */
859 		rdma_req->state = RDMA_REQUEST_STATE_FREE;
860 		STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
861 	}
862 
863 	return resources;
864 
865 cleanup:
866 	nvmf_rdma_resources_destroy(resources);
867 	return NULL;
868 }
869 
870 static void
871 spdk_nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
872 {
873 	struct spdk_nvmf_rdma_recv	*rdma_recv, *recv_tmp;
874 	struct ibv_recv_wr		*bad_recv_wr = NULL;
875 	int				rc;
876 
877 	spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair->cm_id, 0);
878 
879 	spdk_poller_unregister(&rqpair->destruct_poller);
880 
881 	if (rqpair->qd != 0) {
882 		if (rqpair->srq == NULL) {
883 			nvmf_rdma_dump_qpair_contents(rqpair);
884 		}
885 		SPDK_WARNLOG("Destroying qpair when queue depth is %d\n", rqpair->qd);
886 	}
887 
888 	if (rqpair->poller) {
889 		TAILQ_REMOVE(&rqpair->poller->qpairs, rqpair, link);
890 
891 		if (rqpair->srq != NULL && rqpair->resources != NULL) {
892 			/* Drop all received but unprocessed commands for this queue and return them to SRQ */
893 			STAILQ_FOREACH_SAFE(rdma_recv, &rqpair->resources->incoming_queue, link, recv_tmp) {
894 				if (rqpair == rdma_recv->qpair) {
895 					STAILQ_REMOVE_HEAD(&rqpair->resources->incoming_queue, link);
896 					rc = ibv_post_srq_recv(rqpair->srq, &rdma_recv->wr, &bad_recv_wr);
897 					if (rc) {
898 						SPDK_ERRLOG("Unable to re-post rx descriptor\n");
899 					}
900 				}
901 			}
902 		}
903 	}
904 
905 	if (rqpair->cm_id) {
906 		if (rqpair->cm_id->qp != NULL) {
907 			rdma_destroy_qp(rqpair->cm_id);
908 		}
909 		rdma_destroy_id(rqpair->cm_id);
910 
911 		if (rqpair->poller != NULL && rqpair->srq == NULL) {
912 			rqpair->poller->required_num_wr -= MAX_WR_PER_QP(rqpair->max_queue_depth);
913 		}
914 	}
915 
916 	if (rqpair->srq == NULL && rqpair->resources != NULL) {
917 		nvmf_rdma_resources_destroy(rqpair->resources);
918 	}
919 
920 	free(rqpair);
921 }
922 
923 static int
924 nvmf_rdma_resize_cq(struct spdk_nvmf_rdma_qpair *rqpair, struct spdk_nvmf_rdma_device *device)
925 {
926 	struct spdk_nvmf_rdma_poller	*rpoller;
927 	int				rc, num_cqe, required_num_wr;
928 
929 	/* Enlarge CQ size dynamically */
930 	rpoller = rqpair->poller;
931 	required_num_wr = rpoller->required_num_wr + MAX_WR_PER_QP(rqpair->max_queue_depth);
932 	num_cqe = rpoller->num_cqe;
933 	if (num_cqe < required_num_wr) {
934 		num_cqe = spdk_max(num_cqe * 2, required_num_wr);
935 		num_cqe = spdk_min(num_cqe, device->attr.max_cqe);
936 	}
937 
938 	if (rpoller->num_cqe != num_cqe) {
939 		if (required_num_wr > device->attr.max_cqe) {
940 			SPDK_ERRLOG("RDMA CQE requirement (%d) exceeds device max_cqe limitation (%d)\n",
941 				    required_num_wr, device->attr.max_cqe);
942 			return -1;
943 		}
944 
945 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Resize RDMA CQ from %d to %d\n", rpoller->num_cqe, num_cqe);
946 		rc = ibv_resize_cq(rpoller->cq, num_cqe);
947 		if (rc) {
948 			SPDK_ERRLOG("RDMA CQ resize failed: errno %d: %s\n", errno, spdk_strerror(errno));
949 			return -1;
950 		}
951 
952 		rpoller->num_cqe = num_cqe;
953 	}
954 
955 	rpoller->required_num_wr = required_num_wr;
956 	return 0;
957 }
958 
959 static int
960 spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
961 {
962 	struct spdk_nvmf_rdma_qpair		*rqpair;
963 	int					rc;
964 	struct spdk_nvmf_rdma_transport		*rtransport;
965 	struct spdk_nvmf_transport		*transport;
966 	struct spdk_nvmf_rdma_resource_opts	opts;
967 	struct spdk_nvmf_rdma_device		*device;
968 	struct ibv_qp_init_attr			ibv_init_attr;
969 
970 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
971 	device = rqpair->port->device;
972 
973 	memset(&ibv_init_attr, 0, sizeof(struct ibv_qp_init_attr));
974 	ibv_init_attr.qp_context	= rqpair;
975 	ibv_init_attr.qp_type		= IBV_QPT_RC;
976 	ibv_init_attr.send_cq		= rqpair->poller->cq;
977 	ibv_init_attr.recv_cq		= rqpair->poller->cq;
978 
979 	if (rqpair->srq) {
980 		ibv_init_attr.srq		= rqpair->srq;
981 	} else {
982 		ibv_init_attr.cap.max_recv_wr	= rqpair->max_queue_depth +
983 						  1; /* RECV operations + dummy drain WR */
984 	}
985 
986 	ibv_init_attr.cap.max_send_wr	= rqpair->max_queue_depth *
987 					  2 + 1; /* SEND, READ, and WRITE operations + dummy drain WR */
988 	ibv_init_attr.cap.max_send_sge	= spdk_min(device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
989 	ibv_init_attr.cap.max_recv_sge	= spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
990 
991 	if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
992 		SPDK_ERRLOG("Failed to resize the completion queue. Cannot initialize qpair.\n");
993 		goto error;
994 	}
995 
996 	rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &ibv_init_attr);
997 	if (rc) {
998 		SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno));
999 		goto error;
1000 	}
1001 
1002 	rqpair->max_send_depth = spdk_min((uint32_t)(rqpair->max_queue_depth * 2 + 1),
1003 					  ibv_init_attr.cap.max_send_wr);
1004 	rqpair->max_send_sge = spdk_min(NVMF_DEFAULT_TX_SGE, ibv_init_attr.cap.max_send_sge);
1005 	rqpair->max_recv_sge = spdk_min(NVMF_DEFAULT_RX_SGE, ibv_init_attr.cap.max_recv_sge);
1006 	spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair->cm_id, 0);
1007 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "New RDMA Connection: %p\n", qpair);
1008 
1009 	rqpair->sends_to_post.first = NULL;
1010 	rqpair->sends_to_post.last = NULL;
1011 
1012 	if (rqpair->poller->srq == NULL) {
1013 		rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
1014 		transport = &rtransport->transport;
1015 
1016 		opts.qp = rqpair->cm_id->qp;
1017 		opts.pd = rqpair->cm_id->pd;
1018 		opts.qpair = rqpair;
1019 		opts.shared = false;
1020 		opts.max_queue_depth = rqpair->max_queue_depth;
1021 		opts.in_capsule_data_size = transport->opts.in_capsule_data_size;
1022 
1023 		rqpair->resources = nvmf_rdma_resources_create(&opts);
1024 
1025 		if (!rqpair->resources) {
1026 			SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
1027 			goto error;
1028 		}
1029 	} else {
1030 		rqpair->resources = rqpair->poller->resources;
1031 	}
1032 
1033 	rqpair->current_recv_depth = 0;
1034 	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
1035 	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
1036 
1037 	return 0;
1038 
1039 error:
1040 	rdma_destroy_id(rqpair->cm_id);
1041 	rqpair->cm_id = NULL;
1042 	spdk_nvmf_rdma_qpair_destroy(rqpair);
1043 	return -1;
1044 }
1045 
1046 /* Append the given recv wr structure to the resource structs outstanding recvs list. */
1047 /* This function accepts either a single wr or the first wr in a linked list. */
1048 static void
1049 nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
1050 {
1051 	struct ibv_recv_wr *last;
1052 
1053 	last = first;
1054 	while (last->next != NULL) {
1055 		last = last->next;
1056 	}
1057 
1058 	if (rqpair->resources->recvs_to_post.first == NULL) {
1059 		rqpair->resources->recvs_to_post.first = first;
1060 		rqpair->resources->recvs_to_post.last = last;
1061 		if (rqpair->srq == NULL) {
1062 			STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
1063 		}
1064 	} else {
1065 		rqpair->resources->recvs_to_post.last->next = first;
1066 		rqpair->resources->recvs_to_post.last = last;
1067 	}
1068 }
1069 
1070 /* Append the given send wr structure to the qpair's outstanding sends list. */
1071 /* This function accepts either a single wr or the first wr in a linked list. */
1072 static void
1073 nvmf_rdma_qpair_queue_send_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *first)
1074 {
1075 	struct ibv_send_wr *last;
1076 
1077 	last = first;
1078 	while (last->next != NULL) {
1079 		last = last->next;
1080 	}
1081 
1082 	if (rqpair->sends_to_post.first == NULL) {
1083 		rqpair->sends_to_post.first = first;
1084 		rqpair->sends_to_post.last = last;
1085 		STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
1086 	} else {
1087 		rqpair->sends_to_post.last->next = first;
1088 		rqpair->sends_to_post.last = last;
1089 	}
1090 }
1091 
1092 static int
1093 request_transfer_in(struct spdk_nvmf_request *req)
1094 {
1095 	struct spdk_nvmf_rdma_request	*rdma_req;
1096 	struct spdk_nvmf_qpair		*qpair;
1097 	struct spdk_nvmf_rdma_qpair	*rqpair;
1098 
1099 	qpair = req->qpair;
1100 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1101 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
1102 
1103 	assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
1104 	assert(rdma_req != NULL);
1105 
1106 	nvmf_rdma_qpair_queue_send_wrs(rqpair, &rdma_req->data.wr);
1107 	rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
1108 	rqpair->current_send_depth += rdma_req->num_outstanding_data_wr;
1109 	return 0;
1110 }
1111 
1112 static int
1113 request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
1114 {
1115 	int				num_outstanding_data_wr = 0;
1116 	struct spdk_nvmf_rdma_request	*rdma_req;
1117 	struct spdk_nvmf_qpair		*qpair;
1118 	struct spdk_nvmf_rdma_qpair	*rqpair;
1119 	struct spdk_nvme_cpl		*rsp;
1120 	struct ibv_send_wr		*first = NULL;
1121 
1122 	*data_posted = 0;
1123 	qpair = req->qpair;
1124 	rsp = &req->rsp->nvme_cpl;
1125 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1126 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
1127 
1128 	/* Advance our sq_head pointer */
1129 	if (qpair->sq_head == qpair->sq_head_max) {
1130 		qpair->sq_head = 0;
1131 	} else {
1132 		qpair->sq_head++;
1133 	}
1134 	rsp->sqhd = qpair->sq_head;
1135 
1136 	/* queue the capsule for the recv buffer */
1137 	assert(rdma_req->recv != NULL);
1138 
1139 	nvmf_rdma_qpair_queue_recv_wrs(rqpair, &rdma_req->recv->wr);
1140 
1141 	rdma_req->recv = NULL;
1142 	assert(rqpair->current_recv_depth > 0);
1143 	rqpair->current_recv_depth--;
1144 
1145 	/* Build the response which consists of optional
1146 	 * RDMA WRITEs to transfer data, plus an RDMA SEND
1147 	 * containing the response.
1148 	 */
1149 	first = &rdma_req->rsp.wr;
1150 
1151 	if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1152 	    req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1153 		first = &rdma_req->data.wr;
1154 		*data_posted = 1;
1155 		num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
1156 	}
1157 	nvmf_rdma_qpair_queue_send_wrs(rqpair, first);
1158 	/* +1 for the rsp wr */
1159 	rqpair->current_send_depth += num_outstanding_data_wr + 1;
1160 
1161 	return 0;
1162 }
1163 
1164 static int
1165 spdk_nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *rqpair)
1166 {
1167 	struct spdk_nvmf_rdma_accept_private_data	accept_data;
1168 	struct rdma_conn_param				ctrlr_event_data = {};
1169 	int						rc;
1170 
1171 	accept_data.recfmt = 0;
1172 	accept_data.crqsize = rqpair->max_queue_depth;
1173 
1174 	ctrlr_event_data.private_data = &accept_data;
1175 	ctrlr_event_data.private_data_len = sizeof(accept_data);
1176 	if (id->ps == RDMA_PS_TCP) {
1177 		ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */
1178 		ctrlr_event_data.initiator_depth = rqpair->max_read_depth;
1179 	}
1180 
1181 	/* Configure infinite retries for the initiator side qpair.
1182 	 * When using a shared receive queue on the target side,
1183 	 * we need to pass this value to the initiator to prevent the
1184 	 * initiator side NIC from completing SEND requests back to the
1185 	 * initiator with status rnr_retry_count_exceeded. */
1186 	if (rqpair->srq != NULL) {
1187 		ctrlr_event_data.rnr_retry_count = 0x7;
1188 	}
1189 
1190 	rc = rdma_accept(id, &ctrlr_event_data);
1191 	if (rc) {
1192 		SPDK_ERRLOG("Error %d on rdma_accept\n", errno);
1193 	} else {
1194 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Sent back the accept\n");
1195 	}
1196 
1197 	return rc;
1198 }
1199 
1200 static void
1201 spdk_nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport_error error)
1202 {
1203 	struct spdk_nvmf_rdma_reject_private_data	rej_data;
1204 
1205 	rej_data.recfmt = 0;
1206 	rej_data.sts = error;
1207 
1208 	rdma_reject(id, &rej_data, sizeof(rej_data));
1209 }
1210 
1211 static int
1212 nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event,
1213 		  new_qpair_fn cb_fn)
1214 {
1215 	struct spdk_nvmf_rdma_transport *rtransport;
1216 	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
1217 	struct spdk_nvmf_rdma_port	*port;
1218 	struct rdma_conn_param		*rdma_param = NULL;
1219 	const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
1220 	uint16_t			max_queue_depth;
1221 	uint16_t			max_read_depth;
1222 
1223 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
1224 
1225 	assert(event->id != NULL); /* Impossible. Can't even reject the connection. */
1226 	assert(event->id->verbs != NULL); /* Impossible. No way to handle this. */
1227 
1228 	rdma_param = &event->param.conn;
1229 	if (rdma_param->private_data == NULL ||
1230 	    rdma_param->private_data_len < sizeof(struct spdk_nvmf_rdma_request_private_data)) {
1231 		SPDK_ERRLOG("connect request: no private data provided\n");
1232 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH);
1233 		return -1;
1234 	}
1235 
1236 	private_data = rdma_param->private_data;
1237 	if (private_data->recfmt != 0) {
1238 		SPDK_ERRLOG("Received RDMA private data with RECFMT != 0\n");
1239 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_RECFMT);
1240 		return -1;
1241 	}
1242 
1243 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Connect Recv on fabric intf name %s, dev_name %s\n",
1244 		      event->id->verbs->device->name, event->id->verbs->device->dev_name);
1245 
1246 	port = event->listen_id->context;
1247 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Listen Id was %p with verbs %p. ListenAddr: %p\n",
1248 		      event->listen_id, event->listen_id->verbs, port);
1249 
1250 	/* Figure out the supported queue depth. This is a multi-step process
1251 	 * that takes into account hardware maximums, host provided values,
1252 	 * and our target's internal memory limits */
1253 
1254 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Calculating Queue Depth\n");
1255 
1256 	/* Start with the maximum queue depth allowed by the target */
1257 	max_queue_depth = rtransport->transport.opts.max_queue_depth;
1258 	max_read_depth = rtransport->transport.opts.max_queue_depth;
1259 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Target Max Queue Depth: %d\n",
1260 		      rtransport->transport.opts.max_queue_depth);
1261 
1262 	/* Next check the local NIC's hardware limitations */
1263 	SPDK_DEBUGLOG(SPDK_LOG_RDMA,
1264 		      "Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
1265 		      port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
1266 	max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
1267 	max_read_depth = spdk_min(max_read_depth, port->device->attr.max_qp_init_rd_atom);
1268 
1269 	/* Next check the remote NIC's hardware limitations */
1270 	SPDK_DEBUGLOG(SPDK_LOG_RDMA,
1271 		      "Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
1272 		      rdma_param->initiator_depth, rdma_param->responder_resources);
1273 	if (rdma_param->initiator_depth > 0) {
1274 		max_read_depth = spdk_min(max_read_depth, rdma_param->initiator_depth);
1275 	}
1276 
1277 	/* Finally check for the host software requested values, which are
1278 	 * optional. */
1279 	if (rdma_param->private_data != NULL &&
1280 	    rdma_param->private_data_len >= sizeof(struct spdk_nvmf_rdma_request_private_data)) {
1281 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Receive Queue Size: %d\n", private_data->hrqsize);
1282 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Host Send Queue Size: %d\n", private_data->hsqsize);
1283 		max_queue_depth = spdk_min(max_queue_depth, private_data->hrqsize);
1284 		max_queue_depth = spdk_min(max_queue_depth, private_data->hsqsize + 1);
1285 	}
1286 
1287 	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
1288 		      max_queue_depth, max_read_depth);
1289 
1290 	rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
1291 	if (rqpair == NULL) {
1292 		SPDK_ERRLOG("Could not allocate new connection.\n");
1293 		spdk_nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
1294 		return -1;
1295 	}
1296 
1297 	rqpair->port = port;
1298 	rqpair->max_queue_depth = max_queue_depth;
1299 	rqpair->max_read_depth = max_read_depth;
1300 	rqpair->cm_id = event->id;
1301 	rqpair->listen_id = event->listen_id;
1302 	rqpair->qpair.transport = transport;
1303 
1304 	event->id->context = &rqpair->qpair;
1305 
1306 	cb_fn(&rqpair->qpair);
1307 
1308 	return 0;
1309 }
1310 
1311 static int
1312 spdk_nvmf_rdma_mem_notify(void *cb_ctx, struct spdk_mem_map *map,
1313 			  enum spdk_mem_map_notify_action action,
1314 			  void *vaddr, size_t size)
1315 {
1316 	struct ibv_pd *pd = cb_ctx;
1317 	struct ibv_mr *mr;
1318 
1319 	switch (action) {
1320 	case SPDK_MEM_MAP_NOTIFY_REGISTER:
1321 		if (!g_nvmf_hooks.get_rkey) {
1322 			mr = ibv_reg_mr(pd, vaddr, size,
1323 					IBV_ACCESS_LOCAL_WRITE |
1324 					IBV_ACCESS_REMOTE_READ |
1325 					IBV_ACCESS_REMOTE_WRITE);
1326 			if (mr == NULL) {
1327 				SPDK_ERRLOG("ibv_reg_mr() failed\n");
1328 				return -1;
1329 			} else {
1330 				spdk_mem_map_set_translation(map, (uint64_t)vaddr, size, (uint64_t)mr);
1331 			}
1332 		} else {
1333 			spdk_mem_map_set_translation(map, (uint64_t)vaddr, size,
1334 						     g_nvmf_hooks.get_rkey(pd, vaddr, size));
1335 		}
1336 		break;
1337 	case SPDK_MEM_MAP_NOTIFY_UNREGISTER:
1338 		if (!g_nvmf_hooks.get_rkey) {
1339 			mr = (struct ibv_mr *)spdk_mem_map_translate(map, (uint64_t)vaddr, NULL);
1340 			spdk_mem_map_clear_translation(map, (uint64_t)vaddr, size);
1341 			if (mr) {
1342 				ibv_dereg_mr(mr);
1343 			}
1344 		}
1345 		break;
1346 	}
1347 
1348 	return 0;
1349 }
1350 
1351 static int
1352 spdk_nvmf_rdma_check_contiguous_entries(uint64_t addr_1, uint64_t addr_2)
1353 {
1354 	/* Two contiguous mappings will point to the same address which is the start of the RDMA MR. */
1355 	return addr_1 == addr_2;
1356 }
1357 
1358 static void
1359 spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
1360 				    struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
1361 				    uint32_t num_buffers)
1362 {
1363 	uint32_t i;
1364 
1365 	for (i = 0; i < num_buffers; i++) {
1366 		if (group->buf_cache_count < group->buf_cache_size) {
1367 			STAILQ_INSERT_HEAD(&group->buf_cache,
1368 					   (struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->buffers[i], link);
1369 			group->buf_cache_count++;
1370 		} else {
1371 			spdk_mempool_put(transport->data_buf_pool, rdma_req->buffers[i]);
1372 		}
1373 		rdma_req->req.iov[i].iov_base = NULL;
1374 		rdma_req->buffers[i] = NULL;
1375 		rdma_req->req.iov[i].iov_len = 0;
1376 
1377 	}
1378 	rdma_req->data_from_pool = false;
1379 }
1380 
1381 static int
1382 nvmf_rdma_request_get_buffers(struct spdk_nvmf_rdma_request *rdma_req,
1383 			      struct spdk_nvmf_transport_poll_group *group, struct spdk_nvmf_transport *transport,
1384 			      uint32_t num_buffers)
1385 {
1386 	uint32_t i = 0;
1387 
1388 	while (i < num_buffers) {
1389 		if (!(STAILQ_EMPTY(&group->buf_cache))) {
1390 			group->buf_cache_count--;
1391 			rdma_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
1392 			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
1393 			assert(rdma_req->buffers[i] != NULL);
1394 			i++;
1395 		} else {
1396 			if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->buffers[i], num_buffers - i)) {
1397 				goto err_exit;
1398 			}
1399 			i += num_buffers - i;
1400 		}
1401 	}
1402 
1403 	return 0;
1404 
1405 err_exit:
1406 	spdk_nvmf_rdma_request_free_buffers(rdma_req, group, transport, i);
1407 	return -ENOMEM;
1408 }
1409 
1410 typedef enum spdk_nvme_data_transfer spdk_nvme_data_transfer_t;
1411 
1412 static spdk_nvme_data_transfer_t
1413 spdk_nvmf_rdma_request_get_xfer(struct spdk_nvmf_rdma_request *rdma_req)
1414 {
1415 	enum spdk_nvme_data_transfer xfer;
1416 	struct spdk_nvme_cmd *cmd = &rdma_req->req.cmd->nvme_cmd;
1417 	struct spdk_nvme_sgl_descriptor *sgl = &cmd->dptr.sgl1;
1418 
1419 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1420 	rdma_req->rsp.wr.opcode = IBV_WR_SEND;
1421 	rdma_req->rsp.wr.imm_data = 0;
1422 #endif
1423 
1424 	/* Figure out data transfer direction */
1425 	if (cmd->opc == SPDK_NVME_OPC_FABRIC) {
1426 		xfer = spdk_nvme_opc_get_data_transfer(rdma_req->req.cmd->nvmf_cmd.fctype);
1427 	} else {
1428 		xfer = spdk_nvme_opc_get_data_transfer(cmd->opc);
1429 
1430 		/* Some admin commands are special cases */
1431 		if ((rdma_req->req.qpair->qid == 0) &&
1432 		    ((cmd->opc == SPDK_NVME_OPC_GET_FEATURES) ||
1433 		     (cmd->opc == SPDK_NVME_OPC_SET_FEATURES))) {
1434 			switch (cmd->cdw10 & 0xff) {
1435 			case SPDK_NVME_FEAT_LBA_RANGE_TYPE:
1436 			case SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1437 			case SPDK_NVME_FEAT_HOST_IDENTIFIER:
1438 				break;
1439 			default:
1440 				xfer = SPDK_NVME_DATA_NONE;
1441 			}
1442 		}
1443 	}
1444 
1445 	if (xfer == SPDK_NVME_DATA_NONE) {
1446 		return xfer;
1447 	}
1448 
1449 	/* Even for commands that may transfer data, they could have specified 0 length.
1450 	 * We want those to show up with xfer SPDK_NVME_DATA_NONE.
1451 	 */
1452 	switch (sgl->generic.type) {
1453 	case SPDK_NVME_SGL_TYPE_DATA_BLOCK:
1454 	case SPDK_NVME_SGL_TYPE_BIT_BUCKET:
1455 	case SPDK_NVME_SGL_TYPE_SEGMENT:
1456 	case SPDK_NVME_SGL_TYPE_LAST_SEGMENT:
1457 	case SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK:
1458 		if (sgl->unkeyed.length == 0) {
1459 			xfer = SPDK_NVME_DATA_NONE;
1460 		}
1461 		break;
1462 	case SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK:
1463 		if (sgl->keyed.length == 0) {
1464 			xfer = SPDK_NVME_DATA_NONE;
1465 		}
1466 		break;
1467 	}
1468 
1469 	return xfer;
1470 }
1471 
1472 static int
1473 nvmf_request_alloc_wrs(struct spdk_nvmf_rdma_transport *rtransport,
1474 		       struct spdk_nvmf_rdma_request *rdma_req,
1475 		       uint32_t num_sgl_descriptors)
1476 {
1477 	struct spdk_nvmf_rdma_request_data	*work_requests[SPDK_NVMF_MAX_SGL_ENTRIES];
1478 	struct spdk_nvmf_rdma_request_data	*current_data_wr;
1479 	uint32_t				i;
1480 
1481 	if (spdk_mempool_get_bulk(rtransport->data_wr_pool, (void **)work_requests, num_sgl_descriptors)) {
1482 		return -ENOMEM;
1483 	}
1484 
1485 	current_data_wr = &rdma_req->data;
1486 
1487 	for (i = 0; i < num_sgl_descriptors; i++) {
1488 		if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1489 			current_data_wr->wr.opcode = IBV_WR_RDMA_WRITE;
1490 		} else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1491 			current_data_wr->wr.opcode = IBV_WR_RDMA_READ;
1492 		} else {
1493 			assert(false);
1494 		}
1495 		work_requests[i]->wr.send_flags = IBV_SEND_SIGNALED;
1496 		work_requests[i]->wr.sg_list = work_requests[i]->sgl;
1497 		work_requests[i]->wr.wr_id = rdma_req->data.wr.wr_id;
1498 		current_data_wr->wr.next = &work_requests[i]->wr;
1499 		current_data_wr = work_requests[i];
1500 	}
1501 
1502 	if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1503 		current_data_wr->wr.opcode = IBV_WR_RDMA_WRITE;
1504 		current_data_wr->wr.next = &rdma_req->rsp.wr;
1505 	} else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1506 		current_data_wr->wr.opcode = IBV_WR_RDMA_READ;
1507 		current_data_wr->wr.next = NULL;
1508 	}
1509 	return 0;
1510 }
1511 
1512 static int
1513 nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
1514 		       struct spdk_nvmf_rdma_poll_group *rgroup,
1515 		       struct spdk_nvmf_rdma_device *device,
1516 		       struct spdk_nvmf_rdma_request *rdma_req,
1517 		       struct ibv_send_wr *wr,
1518 		       uint32_t length)
1519 {
1520 	uint64_t	translation_len;
1521 	uint32_t	remaining_length = length;
1522 	uint32_t	iovcnt;
1523 	uint32_t	i = 0;
1524 
1525 
1526 	while (remaining_length) {
1527 		iovcnt = rdma_req->req.iovcnt;
1528 		rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->buffers[iovcnt] +
1529 						     NVMF_DATA_BUFFER_MASK) &
1530 						     ~NVMF_DATA_BUFFER_MASK);
1531 		rdma_req->req.iov[iovcnt].iov_len  = spdk_min(remaining_length,
1532 						     rtransport->transport.opts.io_unit_size);
1533 		rdma_req->req.iovcnt++;
1534 		wr->sg_list[i].addr = (uintptr_t)(rdma_req->req.iov[iovcnt].iov_base);
1535 		wr->sg_list[i].length = rdma_req->req.iov[iovcnt].iov_len;
1536 		translation_len = rdma_req->req.iov[iovcnt].iov_len;
1537 
1538 		if (!g_nvmf_hooks.get_rkey) {
1539 			wr->sg_list[i].lkey = ((struct ibv_mr *)spdk_mem_map_translate(device->map,
1540 					       (uint64_t)rdma_req->buffers[iovcnt], &translation_len))->lkey;
1541 		} else {
1542 			wr->sg_list[i].lkey = spdk_mem_map_translate(device->map,
1543 					      (uint64_t)rdma_req->buffers[iovcnt], &translation_len);
1544 		}
1545 
1546 		remaining_length -= rdma_req->req.iov[iovcnt].iov_len;
1547 
1548 		if (translation_len < rdma_req->req.iov[iovcnt].iov_len) {
1549 			SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions\n");
1550 			return -EINVAL;
1551 		}
1552 		i++;
1553 	}
1554 	wr->num_sge = i;
1555 
1556 	return 0;
1557 }
1558 
1559 static int
1560 spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
1561 				 struct spdk_nvmf_rdma_device *device,
1562 				 struct spdk_nvmf_rdma_request *rdma_req)
1563 {
1564 	struct spdk_nvmf_rdma_qpair		*rqpair;
1565 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1566 	uint32_t				num_buffers;
1567 	uint32_t				i = 0;
1568 	int					rc = 0;
1569 
1570 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1571 	rgroup = rqpair->poller->group;
1572 	rdma_req->req.iovcnt = 0;
1573 
1574 	num_buffers = rdma_req->req.length / rtransport->transport.opts.io_unit_size;
1575 	if (rdma_req->req.length % rtransport->transport.opts.io_unit_size) {
1576 		num_buffers++;
1577 	}
1578 
1579 	if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers)) {
1580 		return -ENOMEM;
1581 	}
1582 
1583 	rdma_req->req.iovcnt = 0;
1584 
1585 	rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, rdma_req, &rdma_req->data.wr,
1586 				    rdma_req->req.length);
1587 	if (rc != 0) {
1588 		goto err_exit;
1589 	}
1590 
1591 	assert(rdma_req->req.iovcnt <= rqpair->max_send_sge);
1592 
1593 	rdma_req->data_from_pool = true;
1594 
1595 	return rc;
1596 
1597 err_exit:
1598 	spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
1599 	while (i) {
1600 		i--;
1601 		rdma_req->data.wr.sg_list[i].addr = 0;
1602 		rdma_req->data.wr.sg_list[i].length = 0;
1603 		rdma_req->data.wr.sg_list[i].lkey = 0;
1604 	}
1605 	rdma_req->req.iovcnt = 0;
1606 	return rc;
1607 }
1608 
1609 static int
1610 nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtransport,
1611 				      struct spdk_nvmf_rdma_device *device,
1612 				      struct spdk_nvmf_rdma_request *rdma_req)
1613 {
1614 	struct spdk_nvmf_rdma_qpair		*rqpair;
1615 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1616 	struct ibv_send_wr			*current_wr;
1617 	struct spdk_nvmf_request		*req = &rdma_req->req;
1618 	struct spdk_nvme_sgl_descriptor		*inline_segment, *desc;
1619 	uint32_t				num_sgl_descriptors;
1620 	uint32_t				num_buffers = 0;
1621 	uint32_t				i;
1622 	int					rc;
1623 
1624 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1625 	rgroup = rqpair->poller->group;
1626 
1627 	inline_segment = &req->cmd->nvme_cmd.dptr.sgl1;
1628 	assert(inline_segment->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
1629 	assert(inline_segment->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1630 
1631 	num_sgl_descriptors = inline_segment->unkeyed.length / sizeof(struct spdk_nvme_sgl_descriptor);
1632 	assert(num_sgl_descriptors <= SPDK_NVMF_MAX_SGL_ENTRIES);
1633 	desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1634 
1635 	for (i = 0; i < num_sgl_descriptors; i++) {
1636 		num_buffers += desc->keyed.length / rtransport->transport.opts.io_unit_size;
1637 		if (desc->keyed.length % rtransport->transport.opts.io_unit_size) {
1638 			num_buffers++;
1639 		}
1640 		desc++;
1641 	}
1642 	/* If the number of buffers is too large, then we know the I/O is larger than allowed. Fail it. */
1643 	if (num_buffers > NVMF_REQ_MAX_BUFFERS) {
1644 		return -EINVAL;
1645 	}
1646 	if (nvmf_rdma_request_get_buffers(rdma_req, &rgroup->group, &rtransport->transport,
1647 					  num_buffers) != 0) {
1648 		return -ENOMEM;
1649 	}
1650 
1651 	if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
1652 		spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
1653 		return -ENOMEM;
1654 	}
1655 
1656 	/* The first WR must always be the embedded data WR. This is how we unwind them later. */
1657 	current_wr = &rdma_req->data.wr;
1658 
1659 	req->iovcnt = 0;
1660 	desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1661 	for (i = 0; i < num_sgl_descriptors; i++) {
1662 		/* The descriptors must be keyed data block descriptors with an address, not an offset. */
1663 		if (spdk_unlikely(desc->generic.type != SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK ||
1664 				  desc->keyed.subtype != SPDK_NVME_SGL_SUBTYPE_ADDRESS)) {
1665 			rc = -EINVAL;
1666 			goto err_exit;
1667 		}
1668 
1669 		current_wr->num_sge = 0;
1670 		req->length += desc->keyed.length;
1671 
1672 		rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, rdma_req, current_wr,
1673 					    desc->keyed.length);
1674 		if (rc != 0) {
1675 			rc = -ENOMEM;
1676 			goto err_exit;
1677 		}
1678 
1679 		current_wr->wr.rdma.rkey = desc->keyed.key;
1680 		current_wr->wr.rdma.remote_addr = desc->address;
1681 		current_wr = current_wr->next;
1682 		desc++;
1683 	}
1684 
1685 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1686 	/* Go back to the last descriptor in the list. */
1687 	desc--;
1688 	if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1689 		if (desc->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
1690 			rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1691 			rdma_req->rsp.wr.imm_data = desc->keyed.key;
1692 		}
1693 	}
1694 #endif
1695 
1696 	rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
1697 	rdma_req->data_from_pool = true;
1698 
1699 	return 0;
1700 
1701 err_exit:
1702 	spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport, num_buffers);
1703 	nvmf_rdma_request_free_data(rdma_req, rtransport);
1704 	return rc;
1705 }
1706 
1707 static int
1708 spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
1709 				 struct spdk_nvmf_rdma_device *device,
1710 				 struct spdk_nvmf_rdma_request *rdma_req)
1711 {
1712 	struct spdk_nvme_cmd			*cmd;
1713 	struct spdk_nvme_cpl			*rsp;
1714 	struct spdk_nvme_sgl_descriptor		*sgl;
1715 	int					rc;
1716 
1717 	cmd = &rdma_req->req.cmd->nvme_cmd;
1718 	rsp = &rdma_req->req.rsp->nvme_cpl;
1719 	sgl = &cmd->dptr.sgl1;
1720 
1721 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
1722 	    (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
1723 	     sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
1724 		if (sgl->keyed.length > rtransport->transport.opts.max_io_size) {
1725 			SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
1726 				    sgl->keyed.length, rtransport->transport.opts.max_io_size);
1727 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1728 			return -1;
1729 		}
1730 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1731 		if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1732 			if (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
1733 				rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1734 				rdma_req->rsp.wr.imm_data = sgl->keyed.key;
1735 			}
1736 		}
1737 #endif
1738 
1739 		/* fill request length and populate iovs */
1740 		rdma_req->req.length = sgl->keyed.length;
1741 
1742 		if (spdk_nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req) < 0) {
1743 			/* No available buffers. Queue this request up. */
1744 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "No available large data buffers. Queueing request %p\n", rdma_req);
1745 			return 0;
1746 		}
1747 
1748 		/* backward compatible */
1749 		rdma_req->req.data = rdma_req->req.iov[0].iov_base;
1750 
1751 		/* rdma wr specifics */
1752 		rdma_req->data.wr.num_sge = rdma_req->req.iovcnt;
1753 		rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
1754 		rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;
1755 		if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1756 			rdma_req->data.wr.opcode = IBV_WR_RDMA_WRITE;
1757 			rdma_req->data.wr.next = &rdma_req->rsp.wr;
1758 		} else if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1759 			rdma_req->data.wr.opcode = IBV_WR_RDMA_READ;
1760 			rdma_req->data.wr.next = NULL;
1761 		}
1762 
1763 		/* set the number of outstanding data WRs for this request. */
1764 		rdma_req->num_outstanding_data_wr = 1;
1765 
1766 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
1767 			      rdma_req->req.iovcnt);
1768 
1769 		return 0;
1770 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
1771 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
1772 		uint64_t offset = sgl->address;
1773 		uint32_t max_len = rtransport->transport.opts.in_capsule_data_size;
1774 
1775 		SPDK_DEBUGLOG(SPDK_LOG_NVMF, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
1776 			      offset, sgl->unkeyed.length);
1777 
1778 		if (offset > max_len) {
1779 			SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
1780 				    offset, max_len);
1781 			rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET;
1782 			return -1;
1783 		}
1784 		max_len -= (uint32_t)offset;
1785 
1786 		if (sgl->unkeyed.length > max_len) {
1787 			SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
1788 				    sgl->unkeyed.length, max_len);
1789 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1790 			return -1;
1791 		}
1792 
1793 		rdma_req->num_outstanding_data_wr = 0;
1794 		rdma_req->req.data = rdma_req->recv->buf + offset;
1795 		rdma_req->data_from_pool = false;
1796 		rdma_req->req.length = sgl->unkeyed.length;
1797 
1798 		rdma_req->req.iov[0].iov_base = rdma_req->req.data;
1799 		rdma_req->req.iov[0].iov_len = rdma_req->req.length;
1800 		rdma_req->req.iovcnt = 1;
1801 
1802 		return 0;
1803 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT &&
1804 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
1805 
1806 		rc = nvmf_rdma_request_fill_iovs_multi_sgl(rtransport, device, rdma_req);
1807 		if (rc == -ENOMEM) {
1808 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "No available large data buffers. Queueing request %p\n", rdma_req);
1809 			return 0;
1810 		} else if (rc == -EINVAL) {
1811 			SPDK_ERRLOG("Multi SGL element request length exceeds the max I/O size\n");
1812 			return -1;
1813 		}
1814 
1815 		/* backward compatible */
1816 		rdma_req->req.data = rdma_req->req.iov[0].iov_base;
1817 
1818 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
1819 			      rdma_req->req.iovcnt);
1820 
1821 		return 0;
1822 	}
1823 
1824 	SPDK_ERRLOG("Invalid NVMf I/O Command SGL:  Type 0x%x, Subtype 0x%x\n",
1825 		    sgl->generic.type, sgl->generic.subtype);
1826 	rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
1827 	return -1;
1828 }
1829 
1830 static void
1831 nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
1832 		       struct spdk_nvmf_rdma_transport	*rtransport)
1833 {
1834 	struct spdk_nvmf_rdma_qpair		*rqpair;
1835 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1836 
1837 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1838 	if (rdma_req->data_from_pool) {
1839 		rgroup = rqpair->poller->group;
1840 
1841 		spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport,
1842 						    rdma_req->req.iovcnt);
1843 	}
1844 	nvmf_rdma_request_free_data(rdma_req, rtransport);
1845 	rdma_req->req.length = 0;
1846 	rdma_req->req.iovcnt = 0;
1847 	rdma_req->req.data = NULL;
1848 	rdma_req->rsp.wr.next = NULL;
1849 	rdma_req->data.wr.next = NULL;
1850 	rqpair->qd--;
1851 
1852 	STAILQ_INSERT_HEAD(&rqpair->resources->free_queue, rdma_req, state_link);
1853 	rdma_req->state = RDMA_REQUEST_STATE_FREE;
1854 }
1855 
1856 static bool
1857 spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
1858 			       struct spdk_nvmf_rdma_request *rdma_req)
1859 {
1860 	struct spdk_nvmf_rdma_qpair	*rqpair;
1861 	struct spdk_nvmf_rdma_device	*device;
1862 	struct spdk_nvmf_rdma_poll_group *rgroup;
1863 	struct spdk_nvme_cpl		*rsp = &rdma_req->req.rsp->nvme_cpl;
1864 	int				rc;
1865 	struct spdk_nvmf_rdma_recv	*rdma_recv;
1866 	enum spdk_nvmf_rdma_request_state prev_state;
1867 	bool				progress = false;
1868 	int				data_posted;
1869 
1870 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1871 	device = rqpair->port->device;
1872 	rgroup = rqpair->poller->group;
1873 
1874 	assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
1875 
1876 	/* If the queue pair is in an error state, force the request to the completed state
1877 	 * to release resources. */
1878 	if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1879 		if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
1880 			STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
1881 		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
1882 			STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
1883 		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
1884 			STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
1885 		}
1886 		rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
1887 	}
1888 
1889 	/* The loop here is to allow for several back-to-back state changes. */
1890 	do {
1891 		prev_state = rdma_req->state;
1892 
1893 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p entering state %d\n", rdma_req, prev_state);
1894 
1895 		switch (rdma_req->state) {
1896 		case RDMA_REQUEST_STATE_FREE:
1897 			/* Some external code must kick a request into RDMA_REQUEST_STATE_NEW
1898 			 * to escape this state. */
1899 			break;
1900 		case RDMA_REQUEST_STATE_NEW:
1901 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEW, 0, 0,
1902 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
1903 			rdma_recv = rdma_req->recv;
1904 
1905 			/* The first element of the SGL is the NVMe command */
1906 			rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
1907 			memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
1908 
1909 			if (rqpair->ibv_state == IBV_QPS_ERR  || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1910 				rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
1911 				break;
1912 			}
1913 
1914 			/* The next state transition depends on the data transfer needs of this request. */
1915 			rdma_req->req.xfer = spdk_nvmf_rdma_request_get_xfer(rdma_req);
1916 
1917 			/* If no data to transfer, ready to execute. */
1918 			if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
1919 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
1920 				break;
1921 			}
1922 
1923 			rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
1924 			STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link);
1925 			break;
1926 		case RDMA_REQUEST_STATE_NEED_BUFFER:
1927 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
1928 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
1929 
1930 			assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
1931 
1932 			if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
1933 				/* This request needs to wait in line to obtain a buffer */
1934 				break;
1935 			}
1936 
1937 			/* Try to get a data buffer */
1938 			rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
1939 			if (rc < 0) {
1940 				STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
1941 				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1942 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
1943 				break;
1944 			}
1945 
1946 			if (!rdma_req->req.data) {
1947 				/* No buffers available. */
1948 				break;
1949 			}
1950 
1951 			STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
1952 
1953 			/* If data is transferring from host to controller and the data didn't
1954 			 * arrive using in capsule data, we need to do a transfer from the host.
1955 			 */
1956 			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) {
1957 				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
1958 				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
1959 				break;
1960 			}
1961 
1962 			rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
1963 			break;
1964 		case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
1965 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING, 0, 0,
1966 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
1967 
1968 			if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_read_queue)) {
1969 				/* This request needs to wait in line to perform RDMA */
1970 				break;
1971 			}
1972 			if (rqpair->current_send_depth + rdma_req->num_outstanding_data_wr > rqpair->max_send_depth
1973 			    || rqpair->current_read_depth + rdma_req->num_outstanding_data_wr > rqpair->max_read_depth) {
1974 				/* We can only have so many WRs outstanding. we have to wait until some finish. */
1975 				break;
1976 			}
1977 
1978 			/* We have already verified that this request is the head of the queue. */
1979 			STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_read_queue, state_link);
1980 
1981 			rc = request_transfer_in(&rdma_req->req);
1982 			if (!rc) {
1983 				rdma_req->state = RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
1984 			} else {
1985 				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
1986 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
1987 			}
1988 			break;
1989 		case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
1990 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0,
1991 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
1992 			/* Some external code must kick a request into RDMA_REQUEST_STATE_READY_TO_EXECUTE
1993 			 * to escape this state. */
1994 			break;
1995 		case RDMA_REQUEST_STATE_READY_TO_EXECUTE:
1996 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0,
1997 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
1998 			rdma_req->state = RDMA_REQUEST_STATE_EXECUTING;
1999 			spdk_nvmf_request_exec(&rdma_req->req);
2000 			break;
2001 		case RDMA_REQUEST_STATE_EXECUTING:
2002 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTING, 0, 0,
2003 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2004 			/* Some external code must kick a request into RDMA_REQUEST_STATE_EXECUTED
2005 			 * to escape this state. */
2006 			break;
2007 		case RDMA_REQUEST_STATE_EXECUTED:
2008 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0,
2009 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2010 			if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2011 				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_write_queue, rdma_req, state_link);
2012 				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING;
2013 			} else {
2014 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2015 			}
2016 			break;
2017 		case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
2018 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING, 0, 0,
2019 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2020 
2021 			if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_write_queue)) {
2022 				/* This request needs to wait in line to perform RDMA */
2023 				break;
2024 			}
2025 			if ((rqpair->current_send_depth + rdma_req->num_outstanding_data_wr + 1) >
2026 			    rqpair->max_send_depth) {
2027 				/* We can only have so many WRs outstanding. we have to wait until some finish.
2028 				 * +1 since each request has an additional wr in the resp. */
2029 				break;
2030 			}
2031 
2032 			/* We have already verified that this request is the head of the queue. */
2033 			STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_write_queue, state_link);
2034 
2035 			/* The data transfer will be kicked off from
2036 			 * RDMA_REQUEST_STATE_READY_TO_COMPLETE state.
2037 			 */
2038 			rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2039 			break;
2040 		case RDMA_REQUEST_STATE_READY_TO_COMPLETE:
2041 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 0, 0,
2042 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2043 			rc = request_transfer_out(&rdma_req->req, &data_posted);
2044 			assert(rc == 0); /* No good way to handle this currently */
2045 			if (rc) {
2046 				rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2047 			} else {
2048 				rdma_req->state = data_posted ? RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
2049 						  RDMA_REQUEST_STATE_COMPLETING;
2050 			}
2051 			break;
2052 		case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
2053 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0,
2054 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2055 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
2056 			 * to escape this state. */
2057 			break;
2058 		case RDMA_REQUEST_STATE_COMPLETING:
2059 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETING, 0, 0,
2060 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2061 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
2062 			 * to escape this state. */
2063 			break;
2064 		case RDMA_REQUEST_STATE_COMPLETED:
2065 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0,
2066 					  (uintptr_t)rdma_req, (uintptr_t)rqpair->cm_id);
2067 
2068 			nvmf_rdma_request_free(rdma_req, rtransport);
2069 			break;
2070 		case RDMA_REQUEST_NUM_STATES:
2071 		default:
2072 			assert(0);
2073 			break;
2074 		}
2075 
2076 		if (rdma_req->state != prev_state) {
2077 			progress = true;
2078 		}
2079 	} while (rdma_req->state != prev_state);
2080 
2081 	return progress;
2082 }
2083 
2084 /* Public API callbacks begin here */
2085 
2086 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH 128
2087 #define SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH 128
2088 #define SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH 4096
2089 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
2090 #define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
2091 #define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
2092 #define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
2093 #define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 4095
2094 #define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
2095 #define SPDK_NVMF_RDMA_DEFAULT_NO_SRQ false;
2096 
2097 static void
2098 spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
2099 {
2100 	opts->max_queue_depth =		SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
2101 	opts->max_qpairs_per_ctrlr =	SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
2102 	opts->in_capsule_data_size =	SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
2103 	opts->max_io_size =		SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
2104 	opts->io_unit_size =		SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE;
2105 	opts->max_aq_depth =		SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
2106 	opts->num_shared_buffers =	SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
2107 	opts->buf_cache_size =		SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
2108 	opts->max_srq_depth =		SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
2109 	opts->no_srq =			SPDK_NVMF_RDMA_DEFAULT_NO_SRQ
2110 }
2111 
2112 const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
2113 	.notify_cb = spdk_nvmf_rdma_mem_notify,
2114 	.are_contiguous = spdk_nvmf_rdma_check_contiguous_entries
2115 };
2116 
2117 static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);
2118 
2119 static struct spdk_nvmf_transport *
2120 spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
2121 {
2122 	int rc;
2123 	struct spdk_nvmf_rdma_transport *rtransport;
2124 	struct spdk_nvmf_rdma_device	*device, *tmp;
2125 	struct ibv_pd			*pd;
2126 	struct ibv_context		**contexts;
2127 	uint32_t			i;
2128 	int				flag;
2129 	uint32_t			sge_count;
2130 	uint32_t			min_shared_buffers;
2131 	int				max_device_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
2132 
2133 	rtransport = calloc(1, sizeof(*rtransport));
2134 	if (!rtransport) {
2135 		return NULL;
2136 	}
2137 
2138 	if (pthread_mutex_init(&rtransport->lock, NULL)) {
2139 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
2140 		free(rtransport);
2141 		return NULL;
2142 	}
2143 
2144 	TAILQ_INIT(&rtransport->devices);
2145 	TAILQ_INIT(&rtransport->ports);
2146 
2147 	rtransport->transport.ops = &spdk_nvmf_transport_rdma;
2148 
2149 	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** RDMA Transport Init ***\n"
2150 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
2151 		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
2152 		     "  in_capsule_data_size=%d, max_aq_depth=%d,\n"
2153 		     "  num_shared_buffers=%d, max_srq_depth=%d, no_srq=%d\n",
2154 		     opts->max_queue_depth,
2155 		     opts->max_io_size,
2156 		     opts->max_qpairs_per_ctrlr,
2157 		     opts->io_unit_size,
2158 		     opts->in_capsule_data_size,
2159 		     opts->max_aq_depth,
2160 		     opts->num_shared_buffers,
2161 		     opts->max_srq_depth,
2162 		     opts->no_srq);
2163 
2164 	/* I/O unit size cannot be larger than max I/O size */
2165 	if (opts->io_unit_size > opts->max_io_size) {
2166 		opts->io_unit_size = opts->max_io_size;
2167 	}
2168 
2169 	if (opts->num_shared_buffers < (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
2170 		SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
2171 			    "the minimum number required to guarantee that forward progress can be made (%d)\n",
2172 			    opts->num_shared_buffers, (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
2173 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2174 		return NULL;
2175 	}
2176 
2177 	min_shared_buffers = spdk_thread_get_count() * opts->buf_cache_size;
2178 	if (min_shared_buffers > opts->num_shared_buffers) {
2179 		SPDK_ERRLOG("There are not enough buffers to satisfy"
2180 			    "per-poll group caches for each thread. (%" PRIu32 ")"
2181 			    "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
2182 		SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
2183 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2184 		return NULL;
2185 	}
2186 
2187 	sge_count = opts->max_io_size / opts->io_unit_size;
2188 	if (sge_count > NVMF_DEFAULT_TX_SGE) {
2189 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
2190 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2191 		return NULL;
2192 	}
2193 
2194 	rtransport->event_channel = rdma_create_event_channel();
2195 	if (rtransport->event_channel == NULL) {
2196 		SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno));
2197 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2198 		return NULL;
2199 	}
2200 
2201 	flag = fcntl(rtransport->event_channel->fd, F_GETFL);
2202 	if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
2203 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n",
2204 			    rtransport->event_channel->fd, spdk_strerror(errno));
2205 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2206 		return NULL;
2207 	}
2208 
2209 	rtransport->data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
2210 				   opts->max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES,
2211 				   sizeof(struct spdk_nvmf_rdma_request_data),
2212 				   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
2213 				   SPDK_ENV_SOCKET_ID_ANY);
2214 	if (!rtransport->data_wr_pool) {
2215 		SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
2216 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2217 		return NULL;
2218 	}
2219 
2220 	contexts = rdma_get_devices(NULL);
2221 	if (contexts == NULL) {
2222 		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
2223 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2224 		return NULL;
2225 	}
2226 
2227 	i = 0;
2228 	rc = 0;
2229 	while (contexts[i] != NULL) {
2230 		device = calloc(1, sizeof(*device));
2231 		if (!device) {
2232 			SPDK_ERRLOG("Unable to allocate memory for RDMA devices.\n");
2233 			rc = -ENOMEM;
2234 			break;
2235 		}
2236 		device->context = contexts[i];
2237 		rc = ibv_query_device(device->context, &device->attr);
2238 		if (rc < 0) {
2239 			SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
2240 			free(device);
2241 			break;
2242 
2243 		}
2244 
2245 		max_device_sge = spdk_min(max_device_sge, device->attr.max_sge);
2246 
2247 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
2248 		if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) {
2249 			SPDK_WARNLOG("The libibverbs on this system supports SEND_WITH_INVALIDATE,");
2250 			SPDK_WARNLOG("but the device with vendor ID %u does not.\n", device->attr.vendor_id);
2251 		}
2252 
2253 		/**
2254 		 * The vendor ID is assigned by the IEEE and an ID of 0 implies Soft-RoCE.
2255 		 * The Soft-RoCE RXE driver does not currently support send with invalidate,
2256 		 * but incorrectly reports that it does. There are changes making their way
2257 		 * through the kernel now that will enable this feature. When they are merged,
2258 		 * we can conditionally enable this feature.
2259 		 *
2260 		 * TODO: enable this for versions of the kernel rxe driver that support it.
2261 		 */
2262 		if (device->attr.vendor_id == 0) {
2263 			device->attr.device_cap_flags &= ~(IBV_DEVICE_MEM_MGT_EXTENSIONS);
2264 		}
2265 #endif
2266 
2267 		/* set up device context async ev fd as NON_BLOCKING */
2268 		flag = fcntl(device->context->async_fd, F_GETFL);
2269 		rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK);
2270 		if (rc < 0) {
2271 			SPDK_ERRLOG("Failed to set context async fd to NONBLOCK.\n");
2272 			free(device);
2273 			break;
2274 		}
2275 
2276 		TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
2277 		i++;
2278 
2279 		pd = NULL;
2280 		if (g_nvmf_hooks.get_ibv_pd) {
2281 			pd = g_nvmf_hooks.get_ibv_pd(NULL, device->context);
2282 		}
2283 
2284 		if (!g_nvmf_hooks.get_ibv_pd) {
2285 			device->pd = ibv_alloc_pd(device->context);
2286 			if (!device->pd) {
2287 				SPDK_ERRLOG("Unable to allocate protection domain.\n");
2288 				spdk_nvmf_rdma_destroy(&rtransport->transport);
2289 				return NULL;
2290 			}
2291 		} else {
2292 			device->pd = pd;
2293 		}
2294 
2295 		assert(device->map == NULL);
2296 
2297 		device->map = spdk_mem_map_alloc(0, &g_nvmf_rdma_map_ops, device->pd);
2298 		if (!device->map) {
2299 			SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
2300 			spdk_nvmf_rdma_destroy(&rtransport->transport);
2301 			return NULL;
2302 		}
2303 
2304 		assert(device->map != NULL);
2305 		assert(device->pd != NULL);
2306 	}
2307 	rdma_free_devices(contexts);
2308 
2309 	if (opts->io_unit_size * max_device_sge < opts->max_io_size) {
2310 		/* divide and round up. */
2311 		opts->io_unit_size = (opts->max_io_size + max_device_sge - 1) / max_device_sge;
2312 
2313 		/* round up to the nearest 4k. */
2314 		opts->io_unit_size = (opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT - 1) & ~NVMF_DATA_BUFFER_MASK;
2315 
2316 		opts->io_unit_size = spdk_max(opts->io_unit_size, SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
2317 		SPDK_NOTICELOG("Adjusting the io unit size to fit the device's maximum I/O size. New I/O unit size %u\n",
2318 			       opts->io_unit_size);
2319 	}
2320 
2321 	if (rc < 0) {
2322 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2323 		return NULL;
2324 	}
2325 
2326 	/* Set up poll descriptor array to monitor events from RDMA and IB
2327 	 * in a single poll syscall
2328 	 */
2329 	rtransport->npoll_fds = i + 1;
2330 	i = 0;
2331 	rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
2332 	if (rtransport->poll_fds == NULL) {
2333 		SPDK_ERRLOG("poll_fds allocation failed\n");
2334 		spdk_nvmf_rdma_destroy(&rtransport->transport);
2335 		return NULL;
2336 	}
2337 
2338 	rtransport->poll_fds[i].fd = rtransport->event_channel->fd;
2339 	rtransport->poll_fds[i++].events = POLLIN;
2340 
2341 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2342 		rtransport->poll_fds[i].fd = device->context->async_fd;
2343 		rtransport->poll_fds[i++].events = POLLIN;
2344 	}
2345 
2346 	return &rtransport->transport;
2347 }
2348 
2349 static int
2350 spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
2351 {
2352 	struct spdk_nvmf_rdma_transport	*rtransport;
2353 	struct spdk_nvmf_rdma_port	*port, *port_tmp;
2354 	struct spdk_nvmf_rdma_device	*device, *device_tmp;
2355 
2356 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2357 
2358 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, port_tmp) {
2359 		TAILQ_REMOVE(&rtransport->ports, port, link);
2360 		rdma_destroy_id(port->id);
2361 		free(port);
2362 	}
2363 
2364 	if (rtransport->poll_fds != NULL) {
2365 		free(rtransport->poll_fds);
2366 	}
2367 
2368 	if (rtransport->event_channel != NULL) {
2369 		rdma_destroy_event_channel(rtransport->event_channel);
2370 	}
2371 
2372 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) {
2373 		TAILQ_REMOVE(&rtransport->devices, device, link);
2374 		if (device->map) {
2375 			spdk_mem_map_free(&device->map);
2376 		}
2377 		if (device->pd) {
2378 			if (!g_nvmf_hooks.get_ibv_pd) {
2379 				ibv_dealloc_pd(device->pd);
2380 			}
2381 		}
2382 		free(device);
2383 	}
2384 
2385 	if (rtransport->data_wr_pool != NULL) {
2386 		if (spdk_mempool_count(rtransport->data_wr_pool) !=
2387 		    (transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES)) {
2388 			SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n",
2389 				    spdk_mempool_count(rtransport->data_wr_pool),
2390 				    transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES);
2391 		}
2392 	}
2393 
2394 	spdk_mempool_free(rtransport->data_wr_pool);
2395 	pthread_mutex_destroy(&rtransport->lock);
2396 	free(rtransport);
2397 
2398 	return 0;
2399 }
2400 
2401 static int
2402 spdk_nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
2403 			       struct spdk_nvme_transport_id *trid,
2404 			       bool peer);
2405 
2406 static int
2407 spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
2408 		      const struct spdk_nvme_transport_id *trid)
2409 {
2410 	struct spdk_nvmf_rdma_transport	*rtransport;
2411 	struct spdk_nvmf_rdma_device	*device;
2412 	struct spdk_nvmf_rdma_port	*port_tmp, *port;
2413 	struct addrinfo			*res;
2414 	struct addrinfo			hints;
2415 	int				family;
2416 	int				rc;
2417 
2418 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2419 
2420 	port = calloc(1, sizeof(*port));
2421 	if (!port) {
2422 		return -ENOMEM;
2423 	}
2424 
2425 	/* Selectively copy the trid. Things like NQN don't matter here - that
2426 	 * mapping is enforced elsewhere.
2427 	 */
2428 	port->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2429 	port->trid.adrfam = trid->adrfam;
2430 	snprintf(port->trid.traddr, sizeof(port->trid.traddr), "%s", trid->traddr);
2431 	snprintf(port->trid.trsvcid, sizeof(port->trid.trsvcid), "%s", trid->trsvcid);
2432 
2433 	pthread_mutex_lock(&rtransport->lock);
2434 	assert(rtransport->event_channel != NULL);
2435 	TAILQ_FOREACH(port_tmp, &rtransport->ports, link) {
2436 		if (spdk_nvme_transport_id_compare(&port_tmp->trid, &port->trid) == 0) {
2437 			port_tmp->ref++;
2438 			free(port);
2439 			/* Already listening at this address */
2440 			pthread_mutex_unlock(&rtransport->lock);
2441 			return 0;
2442 		}
2443 	}
2444 
2445 	rc = rdma_create_id(rtransport->event_channel, &port->id, port, RDMA_PS_TCP);
2446 	if (rc < 0) {
2447 		SPDK_ERRLOG("rdma_create_id() failed\n");
2448 		free(port);
2449 		pthread_mutex_unlock(&rtransport->lock);
2450 		return rc;
2451 	}
2452 
2453 	switch (port->trid.adrfam) {
2454 	case SPDK_NVMF_ADRFAM_IPV4:
2455 		family = AF_INET;
2456 		break;
2457 	case SPDK_NVMF_ADRFAM_IPV6:
2458 		family = AF_INET6;
2459 		break;
2460 	default:
2461 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", port->trid.adrfam);
2462 		free(port);
2463 		pthread_mutex_unlock(&rtransport->lock);
2464 		return -EINVAL;
2465 	}
2466 
2467 	memset(&hints, 0, sizeof(hints));
2468 	hints.ai_family = family;
2469 	hints.ai_flags = AI_NUMERICSERV;
2470 	hints.ai_socktype = SOCK_STREAM;
2471 	hints.ai_protocol = 0;
2472 
2473 	rc = getaddrinfo(port->trid.traddr, port->trid.trsvcid, &hints, &res);
2474 	if (rc) {
2475 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(rc), rc);
2476 		free(port);
2477 		pthread_mutex_unlock(&rtransport->lock);
2478 		return -EINVAL;
2479 	}
2480 
2481 	rc = rdma_bind_addr(port->id, res->ai_addr);
2482 	freeaddrinfo(res);
2483 
2484 	if (rc < 0) {
2485 		SPDK_ERRLOG("rdma_bind_addr() failed\n");
2486 		rdma_destroy_id(port->id);
2487 		free(port);
2488 		pthread_mutex_unlock(&rtransport->lock);
2489 		return rc;
2490 	}
2491 
2492 	if (!port->id->verbs) {
2493 		SPDK_ERRLOG("ibv_context is null\n");
2494 		rdma_destroy_id(port->id);
2495 		free(port);
2496 		pthread_mutex_unlock(&rtransport->lock);
2497 		return -1;
2498 	}
2499 
2500 	rc = rdma_listen(port->id, 10); /* 10 = backlog */
2501 	if (rc < 0) {
2502 		SPDK_ERRLOG("rdma_listen() failed\n");
2503 		rdma_destroy_id(port->id);
2504 		free(port);
2505 		pthread_mutex_unlock(&rtransport->lock);
2506 		return rc;
2507 	}
2508 
2509 	TAILQ_FOREACH(device, &rtransport->devices, link) {
2510 		if (device->context == port->id->verbs) {
2511 			port->device = device;
2512 			break;
2513 		}
2514 	}
2515 	if (!port->device) {
2516 		SPDK_ERRLOG("Accepted a connection with verbs %p, but unable to find a corresponding device.\n",
2517 			    port->id->verbs);
2518 		rdma_destroy_id(port->id);
2519 		free(port);
2520 		pthread_mutex_unlock(&rtransport->lock);
2521 		return -EINVAL;
2522 	}
2523 
2524 	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** NVMf Target Listening on %s port %d ***\n",
2525 		     port->trid.traddr, ntohs(rdma_get_src_port(port->id)));
2526 
2527 	port->ref = 1;
2528 
2529 	TAILQ_INSERT_TAIL(&rtransport->ports, port, link);
2530 	pthread_mutex_unlock(&rtransport->lock);
2531 
2532 	return 0;
2533 }
2534 
2535 static int
2536 spdk_nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
2537 			   const struct spdk_nvme_transport_id *_trid)
2538 {
2539 	struct spdk_nvmf_rdma_transport *rtransport;
2540 	struct spdk_nvmf_rdma_port *port, *tmp;
2541 	struct spdk_nvme_transport_id trid = {};
2542 
2543 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2544 
2545 	/* Selectively copy the trid. Things like NQN don't matter here - that
2546 	 * mapping is enforced elsewhere.
2547 	 */
2548 	trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
2549 	trid.adrfam = _trid->adrfam;
2550 	snprintf(trid.traddr, sizeof(port->trid.traddr), "%s", _trid->traddr);
2551 	snprintf(trid.trsvcid, sizeof(port->trid.trsvcid), "%s", _trid->trsvcid);
2552 
2553 	pthread_mutex_lock(&rtransport->lock);
2554 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, tmp) {
2555 		if (spdk_nvme_transport_id_compare(&port->trid, &trid) == 0) {
2556 			assert(port->ref > 0);
2557 			port->ref--;
2558 			if (port->ref == 0) {
2559 				TAILQ_REMOVE(&rtransport->ports, port, link);
2560 				rdma_destroy_id(port->id);
2561 				free(port);
2562 			}
2563 			break;
2564 		}
2565 	}
2566 
2567 	pthread_mutex_unlock(&rtransport->lock);
2568 	return 0;
2569 }
2570 
2571 static void
2572 spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
2573 				     struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
2574 {
2575 	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
2576 	struct spdk_nvmf_rdma_resources *resources;
2577 
2578 	/* We process I/O in the data transfer pending queue at the highest priority. RDMA reads first */
2579 	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
2580 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2581 			break;
2582 		}
2583 	}
2584 
2585 	/* Then RDMA writes since reads have stronger restrictions than writes */
2586 	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_write_queue, state_link, req_tmp) {
2587 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2588 			break;
2589 		}
2590 	}
2591 
2592 	/* The second highest priority is I/O waiting on memory buffers. */
2593 	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link,
2594 			    req_tmp) {
2595 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2596 			break;
2597 		}
2598 	}
2599 
2600 	resources = rqpair->resources;
2601 	while (!STAILQ_EMPTY(&resources->free_queue) && !STAILQ_EMPTY(&resources->incoming_queue)) {
2602 		rdma_req = STAILQ_FIRST(&resources->free_queue);
2603 		STAILQ_REMOVE_HEAD(&resources->free_queue, state_link);
2604 		rdma_req->recv = STAILQ_FIRST(&resources->incoming_queue);
2605 		STAILQ_REMOVE_HEAD(&resources->incoming_queue, link);
2606 
2607 		if (rqpair->srq != NULL) {
2608 			rdma_req->req.qpair = &rdma_req->recv->qpair->qpair;
2609 			rdma_req->recv->qpair->qd++;
2610 		} else {
2611 			rqpair->qd++;
2612 		}
2613 
2614 		rdma_req->state = RDMA_REQUEST_STATE_NEW;
2615 		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false) {
2616 			break;
2617 		}
2618 	}
2619 }
2620 
2621 static void
2622 _nvmf_rdma_qpair_disconnect(void *ctx)
2623 {
2624 	struct spdk_nvmf_qpair *qpair = ctx;
2625 
2626 	spdk_nvmf_qpair_disconnect(qpair, NULL, NULL);
2627 }
2628 
2629 static void
2630 _nvmf_rdma_try_disconnect(void *ctx)
2631 {
2632 	struct spdk_nvmf_qpair *qpair = ctx;
2633 	struct spdk_nvmf_poll_group *group;
2634 
2635 	/* Read the group out of the qpair. This is normally set and accessed only from
2636 	 * the thread that created the group. Here, we're not on that thread necessarily.
2637 	 * The data member qpair->group begins it's life as NULL and then is assigned to
2638 	 * a pointer and never changes. So fortunately reading this and checking for
2639 	 * non-NULL is thread safe in the x86_64 memory model. */
2640 	group = qpair->group;
2641 
2642 	if (group == NULL) {
2643 		/* The qpair hasn't been assigned to a group yet, so we can't
2644 		 * process a disconnect. Send a message to ourself and try again. */
2645 		spdk_thread_send_msg(spdk_get_thread(), _nvmf_rdma_try_disconnect, qpair);
2646 		return;
2647 	}
2648 
2649 	spdk_thread_send_msg(group->thread, _nvmf_rdma_qpair_disconnect, qpair);
2650 }
2651 
2652 static inline void
2653 spdk_nvmf_rdma_start_disconnect(struct spdk_nvmf_rdma_qpair *rqpair)
2654 {
2655 	if (__sync_bool_compare_and_swap(&rqpair->disconnect_started, false, true)) {
2656 		_nvmf_rdma_try_disconnect(&rqpair->qpair);
2657 	}
2658 }
2659 
2660 static void nvmf_rdma_destroy_drained_qpair(void *ctx)
2661 {
2662 	struct spdk_nvmf_rdma_qpair *rqpair = ctx;
2663 	struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
2664 			struct spdk_nvmf_rdma_transport, transport);
2665 
2666 	/* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */
2667 	if (rqpair->current_send_depth != 0) {
2668 		return;
2669 	}
2670 
2671 	if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) {
2672 		return;
2673 	}
2674 
2675 	if (rqpair->srq != NULL && rqpair->last_wqe_reached == false) {
2676 		return;
2677 	}
2678 
2679 	spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
2680 	spdk_nvmf_rdma_qpair_destroy(rqpair);
2681 }
2682 
2683 
2684 static int
2685 nvmf_rdma_disconnect(struct rdma_cm_event *evt)
2686 {
2687 	struct spdk_nvmf_qpair		*qpair;
2688 	struct spdk_nvmf_rdma_qpair	*rqpair;
2689 
2690 	if (evt->id == NULL) {
2691 		SPDK_ERRLOG("disconnect request: missing cm_id\n");
2692 		return -1;
2693 	}
2694 
2695 	qpair = evt->id->context;
2696 	if (qpair == NULL) {
2697 		SPDK_ERRLOG("disconnect request: no active connection\n");
2698 		return -1;
2699 	}
2700 
2701 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
2702 
2703 	spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair->cm_id, 0);
2704 
2705 	spdk_nvmf_rdma_update_ibv_state(rqpair);
2706 
2707 	spdk_nvmf_rdma_start_disconnect(rqpair);
2708 
2709 	return 0;
2710 }
2711 
2712 #ifdef DEBUG
2713 static const char *CM_EVENT_STR[] = {
2714 	"RDMA_CM_EVENT_ADDR_RESOLVED",
2715 	"RDMA_CM_EVENT_ADDR_ERROR",
2716 	"RDMA_CM_EVENT_ROUTE_RESOLVED",
2717 	"RDMA_CM_EVENT_ROUTE_ERROR",
2718 	"RDMA_CM_EVENT_CONNECT_REQUEST",
2719 	"RDMA_CM_EVENT_CONNECT_RESPONSE",
2720 	"RDMA_CM_EVENT_CONNECT_ERROR",
2721 	"RDMA_CM_EVENT_UNREACHABLE",
2722 	"RDMA_CM_EVENT_REJECTED",
2723 	"RDMA_CM_EVENT_ESTABLISHED",
2724 	"RDMA_CM_EVENT_DISCONNECTED",
2725 	"RDMA_CM_EVENT_DEVICE_REMOVAL",
2726 	"RDMA_CM_EVENT_MULTICAST_JOIN",
2727 	"RDMA_CM_EVENT_MULTICAST_ERROR",
2728 	"RDMA_CM_EVENT_ADDR_CHANGE",
2729 	"RDMA_CM_EVENT_TIMEWAIT_EXIT"
2730 };
2731 #endif /* DEBUG */
2732 
2733 static void
2734 spdk_nvmf_process_cm_event(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
2735 {
2736 	struct spdk_nvmf_rdma_transport *rtransport;
2737 	struct rdma_cm_event		*event;
2738 	int				rc;
2739 
2740 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2741 
2742 	if (rtransport->event_channel == NULL) {
2743 		return;
2744 	}
2745 
2746 	while (1) {
2747 		rc = rdma_get_cm_event(rtransport->event_channel, &event);
2748 		if (rc == 0) {
2749 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
2750 
2751 			spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event);
2752 
2753 			switch (event->event) {
2754 			case RDMA_CM_EVENT_ADDR_RESOLVED:
2755 			case RDMA_CM_EVENT_ADDR_ERROR:
2756 			case RDMA_CM_EVENT_ROUTE_RESOLVED:
2757 			case RDMA_CM_EVENT_ROUTE_ERROR:
2758 				/* No action required. The target never attempts to resolve routes. */
2759 				break;
2760 			case RDMA_CM_EVENT_CONNECT_REQUEST:
2761 				rc = nvmf_rdma_connect(transport, event, cb_fn);
2762 				if (rc < 0) {
2763 					SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
2764 					break;
2765 				}
2766 				break;
2767 			case RDMA_CM_EVENT_CONNECT_RESPONSE:
2768 				/* The target never initiates a new connection. So this will not occur. */
2769 				break;
2770 			case RDMA_CM_EVENT_CONNECT_ERROR:
2771 				/* Can this happen? The docs say it can, but not sure what causes it. */
2772 				break;
2773 			case RDMA_CM_EVENT_UNREACHABLE:
2774 			case RDMA_CM_EVENT_REJECTED:
2775 				/* These only occur on the client side. */
2776 				break;
2777 			case RDMA_CM_EVENT_ESTABLISHED:
2778 				/* TODO: Should we be waiting for this event anywhere? */
2779 				break;
2780 			case RDMA_CM_EVENT_DISCONNECTED:
2781 			case RDMA_CM_EVENT_DEVICE_REMOVAL:
2782 				rc = nvmf_rdma_disconnect(event);
2783 				if (rc < 0) {
2784 					SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
2785 					break;
2786 				}
2787 				break;
2788 			case RDMA_CM_EVENT_MULTICAST_JOIN:
2789 			case RDMA_CM_EVENT_MULTICAST_ERROR:
2790 				/* Multicast is not used */
2791 				break;
2792 			case RDMA_CM_EVENT_ADDR_CHANGE:
2793 				/* Not utilizing this event */
2794 				break;
2795 			case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2796 				/* For now, do nothing. The target never re-uses queue pairs. */
2797 				break;
2798 			default:
2799 				SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
2800 				break;
2801 			}
2802 
2803 			rdma_ack_cm_event(event);
2804 		} else {
2805 			if (errno != EAGAIN && errno != EWOULDBLOCK) {
2806 				SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));
2807 			}
2808 			break;
2809 		}
2810 	}
2811 }
2812 
2813 static void
2814 spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
2815 {
2816 	int				rc;
2817 	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
2818 	struct ibv_async_event		event;
2819 	enum ibv_qp_state		state;
2820 
2821 	rc = ibv_get_async_event(device->context, &event);
2822 
2823 	if (rc) {
2824 		SPDK_ERRLOG("Failed to get async_event (%d): %s\n",
2825 			    errno, spdk_strerror(errno));
2826 		return;
2827 	}
2828 
2829 	switch (event.event_type) {
2830 	case IBV_EVENT_QP_FATAL:
2831 		rqpair = event.element.qp->qp_context;
2832 		SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
2833 		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
2834 				  (uintptr_t)rqpair->cm_id, event.event_type);
2835 		spdk_nvmf_rdma_update_ibv_state(rqpair);
2836 		spdk_nvmf_rdma_start_disconnect(rqpair);
2837 		break;
2838 	case IBV_EVENT_QP_LAST_WQE_REACHED:
2839 		/* This event only occurs for shared receive queues. */
2840 		rqpair = event.element.qp->qp_context;
2841 		rqpair->last_wqe_reached = true;
2842 
2843 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Last WQE reached event received for rqpair %p\n", rqpair);
2844 		/* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */
2845 		if (rqpair->qpair.group) {
2846 			spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_destroy_drained_qpair, rqpair);
2847 		} else {
2848 			SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair);
2849 		}
2850 
2851 		break;
2852 	case IBV_EVENT_SQ_DRAINED:
2853 		/* This event occurs frequently in both error and non-error states.
2854 		 * Check if the qpair is in an error state before sending a message.
2855 		 * Note that we're not on the correct thread to access the qpair, but
2856 		 * the operations that the below calls make all happen to be thread
2857 		 * safe. */
2858 		rqpair = event.element.qp->qp_context;
2859 		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Last sq drained event received for rqpair %p\n", rqpair);
2860 		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
2861 				  (uintptr_t)rqpair->cm_id, event.event_type);
2862 		state = spdk_nvmf_rdma_update_ibv_state(rqpair);
2863 		if (state == IBV_QPS_ERR) {
2864 			spdk_nvmf_rdma_start_disconnect(rqpair);
2865 		}
2866 		break;
2867 	case IBV_EVENT_QP_REQ_ERR:
2868 	case IBV_EVENT_QP_ACCESS_ERR:
2869 	case IBV_EVENT_COMM_EST:
2870 	case IBV_EVENT_PATH_MIG:
2871 	case IBV_EVENT_PATH_MIG_ERR:
2872 		SPDK_NOTICELOG("Async event: %s\n",
2873 			       ibv_event_type_str(event.event_type));
2874 		rqpair = event.element.qp->qp_context;
2875 		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
2876 				  (uintptr_t)rqpair->cm_id, event.event_type);
2877 		spdk_nvmf_rdma_update_ibv_state(rqpair);
2878 		break;
2879 	case IBV_EVENT_CQ_ERR:
2880 	case IBV_EVENT_DEVICE_FATAL:
2881 	case IBV_EVENT_PORT_ACTIVE:
2882 	case IBV_EVENT_PORT_ERR:
2883 	case IBV_EVENT_LID_CHANGE:
2884 	case IBV_EVENT_PKEY_CHANGE:
2885 	case IBV_EVENT_SM_CHANGE:
2886 	case IBV_EVENT_SRQ_ERR:
2887 	case IBV_EVENT_SRQ_LIMIT_REACHED:
2888 	case IBV_EVENT_CLIENT_REREGISTER:
2889 	case IBV_EVENT_GID_CHANGE:
2890 	default:
2891 		SPDK_NOTICELOG("Async event: %s\n",
2892 			       ibv_event_type_str(event.event_type));
2893 		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 0, event.event_type);
2894 		break;
2895 	}
2896 	ibv_ack_async_event(&event);
2897 }
2898 
2899 static void
2900 spdk_nvmf_rdma_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
2901 {
2902 	int	nfds, i = 0;
2903 	struct spdk_nvmf_rdma_transport *rtransport;
2904 	struct spdk_nvmf_rdma_device *device, *tmp;
2905 
2906 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2907 	nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
2908 
2909 	if (nfds <= 0) {
2910 		return;
2911 	}
2912 
2913 	/* The first poll descriptor is RDMA CM event */
2914 	if (rtransport->poll_fds[i++].revents & POLLIN) {
2915 		spdk_nvmf_process_cm_event(transport, cb_fn);
2916 		nfds--;
2917 	}
2918 
2919 	if (nfds == 0) {
2920 		return;
2921 	}
2922 
2923 	/* Second and subsequent poll descriptors are IB async events */
2924 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2925 		if (rtransport->poll_fds[i++].revents & POLLIN) {
2926 			spdk_nvmf_process_ib_event(device);
2927 			nfds--;
2928 		}
2929 	}
2930 	/* check all flagged fd's have been served */
2931 	assert(nfds == 0);
2932 }
2933 
2934 static void
2935 spdk_nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
2936 			struct spdk_nvme_transport_id *trid,
2937 			struct spdk_nvmf_discovery_log_page_entry *entry)
2938 {
2939 	entry->trtype = SPDK_NVMF_TRTYPE_RDMA;
2940 	entry->adrfam = trid->adrfam;
2941 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2942 
2943 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2944 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2945 
2946 	entry->tsas.rdma.rdma_qptype = SPDK_NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED;
2947 	entry->tsas.rdma.rdma_prtype = SPDK_NVMF_RDMA_PRTYPE_NONE;
2948 	entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
2949 }
2950 
2951 static void
2952 spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
2953 
2954 static struct spdk_nvmf_transport_poll_group *
2955 spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
2956 {
2957 	struct spdk_nvmf_rdma_transport		*rtransport;
2958 	struct spdk_nvmf_rdma_poll_group	*rgroup;
2959 	struct spdk_nvmf_rdma_poller		*poller;
2960 	struct spdk_nvmf_rdma_device		*device;
2961 	struct ibv_srq_init_attr		srq_init_attr;
2962 	struct spdk_nvmf_rdma_resource_opts	opts;
2963 	int					num_cqe;
2964 
2965 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2966 
2967 	rgroup = calloc(1, sizeof(*rgroup));
2968 	if (!rgroup) {
2969 		return NULL;
2970 	}
2971 
2972 	TAILQ_INIT(&rgroup->pollers);
2973 	STAILQ_INIT(&rgroup->pending_data_buf_queue);
2974 
2975 	pthread_mutex_lock(&rtransport->lock);
2976 	TAILQ_FOREACH(device, &rtransport->devices, link) {
2977 		poller = calloc(1, sizeof(*poller));
2978 		if (!poller) {
2979 			SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n");
2980 			spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
2981 			pthread_mutex_unlock(&rtransport->lock);
2982 			return NULL;
2983 		}
2984 
2985 		poller->device = device;
2986 		poller->group = rgroup;
2987 
2988 		TAILQ_INIT(&poller->qpairs);
2989 		STAILQ_INIT(&poller->qpairs_pending_send);
2990 		STAILQ_INIT(&poller->qpairs_pending_recv);
2991 
2992 		TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
2993 		if (transport->opts.no_srq == false && device->num_srq < device->attr.max_srq) {
2994 			poller->max_srq_depth = transport->opts.max_srq_depth;
2995 
2996 			device->num_srq++;
2997 			memset(&srq_init_attr, 0, sizeof(struct ibv_srq_init_attr));
2998 			srq_init_attr.attr.max_wr = poller->max_srq_depth;
2999 			srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
3000 			poller->srq = ibv_create_srq(device->pd, &srq_init_attr);
3001 			if (!poller->srq) {
3002 				SPDK_ERRLOG("Unable to create shared receive queue, errno %d\n", errno);
3003 				spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
3004 				pthread_mutex_unlock(&rtransport->lock);
3005 				return NULL;
3006 			}
3007 
3008 			opts.qp = poller->srq;
3009 			opts.pd = device->pd;
3010 			opts.qpair = NULL;
3011 			opts.shared = true;
3012 			opts.max_queue_depth = poller->max_srq_depth;
3013 			opts.in_capsule_data_size = transport->opts.in_capsule_data_size;
3014 
3015 			poller->resources = nvmf_rdma_resources_create(&opts);
3016 			if (!poller->resources) {
3017 				SPDK_ERRLOG("Unable to allocate resources for shared receive queue.\n");
3018 				spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
3019 				pthread_mutex_unlock(&rtransport->lock);
3020 			}
3021 		}
3022 
3023 		/*
3024 		 * When using an srq, we can limit the completion queue at startup.
3025 		 * The following formula represents the calculation:
3026 		 * num_cqe = num_recv + num_data_wr + num_send_wr.
3027 		 * where num_recv=num_data_wr=and num_send_wr=poller->max_srq_depth
3028 		 */
3029 		if (poller->srq) {
3030 			num_cqe = poller->max_srq_depth * 3;
3031 		} else {
3032 			num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
3033 		}
3034 
3035 		poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);
3036 		if (!poller->cq) {
3037 			SPDK_ERRLOG("Unable to create completion queue\n");
3038 			spdk_nvmf_rdma_poll_group_destroy(&rgroup->group);
3039 			pthread_mutex_unlock(&rtransport->lock);
3040 			return NULL;
3041 		}
3042 		poller->num_cqe = num_cqe;
3043 	}
3044 
3045 	pthread_mutex_unlock(&rtransport->lock);
3046 	return &rgroup->group;
3047 }
3048 
3049 static void
3050 spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
3051 {
3052 	struct spdk_nvmf_rdma_poll_group	*rgroup;
3053 	struct spdk_nvmf_rdma_poller		*poller, *tmp;
3054 	struct spdk_nvmf_rdma_qpair		*qpair, *tmp_qpair;
3055 
3056 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
3057 
3058 	if (!rgroup) {
3059 		return;
3060 	}
3061 
3062 	TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) {
3063 		TAILQ_REMOVE(&rgroup->pollers, poller, link);
3064 
3065 		TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) {
3066 			spdk_nvmf_rdma_qpair_destroy(qpair);
3067 		}
3068 
3069 		if (poller->srq) {
3070 			nvmf_rdma_resources_destroy(poller->resources);
3071 			ibv_destroy_srq(poller->srq);
3072 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Destroyed RDMA shared queue %p\n", poller->srq);
3073 		}
3074 
3075 		if (poller->cq) {
3076 			ibv_destroy_cq(poller->cq);
3077 		}
3078 
3079 		free(poller);
3080 	}
3081 
3082 	if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
3083 		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
3084 	}
3085 
3086 	free(rgroup);
3087 }
3088 
3089 static void
3090 spdk_nvmf_rdma_qpair_reject_connection(struct spdk_nvmf_rdma_qpair *rqpair)
3091 {
3092 	if (rqpair->cm_id != NULL) {
3093 		spdk_nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
3094 	}
3095 	spdk_nvmf_rdma_qpair_destroy(rqpair);
3096 }
3097 
3098 static int
3099 spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
3100 			      struct spdk_nvmf_qpair *qpair)
3101 {
3102 	struct spdk_nvmf_rdma_poll_group	*rgroup;
3103 	struct spdk_nvmf_rdma_qpair		*rqpair;
3104 	struct spdk_nvmf_rdma_device		*device;
3105 	struct spdk_nvmf_rdma_poller		*poller;
3106 	int					rc;
3107 
3108 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
3109 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3110 
3111 	device = rqpair->port->device;
3112 
3113 	TAILQ_FOREACH(poller, &rgroup->pollers, link) {
3114 		if (poller->device == device) {
3115 			break;
3116 		}
3117 	}
3118 
3119 	if (!poller) {
3120 		SPDK_ERRLOG("No poller found for device.\n");
3121 		return -1;
3122 	}
3123 
3124 	TAILQ_INSERT_TAIL(&poller->qpairs, rqpair, link);
3125 	rqpair->poller = poller;
3126 	rqpair->srq = rqpair->poller->srq;
3127 
3128 	rc = spdk_nvmf_rdma_qpair_initialize(qpair);
3129 	if (rc < 0) {
3130 		SPDK_ERRLOG("Failed to initialize nvmf_rdma_qpair with qpair=%p\n", qpair);
3131 		return -1;
3132 	}
3133 
3134 	rc = spdk_nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
3135 	if (rc) {
3136 		/* Try to reject, but we probably can't */
3137 		spdk_nvmf_rdma_qpair_reject_connection(rqpair);
3138 		return -1;
3139 	}
3140 
3141 	spdk_nvmf_rdma_update_ibv_state(rqpair);
3142 
3143 	return 0;
3144 }
3145 
3146 static int
3147 spdk_nvmf_rdma_request_free(struct spdk_nvmf_request *req)
3148 {
3149 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
3150 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
3151 			struct spdk_nvmf_rdma_transport, transport);
3152 
3153 	nvmf_rdma_request_free(rdma_req, rtransport);
3154 	return 0;
3155 }
3156 
3157 static int
3158 spdk_nvmf_rdma_request_complete(struct spdk_nvmf_request *req)
3159 {
3160 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
3161 			struct spdk_nvmf_rdma_transport, transport);
3162 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req,
3163 			struct spdk_nvmf_rdma_request, req);
3164 	struct spdk_nvmf_rdma_qpair     *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
3165 			struct spdk_nvmf_rdma_qpair, qpair);
3166 
3167 	if (rqpair->ibv_state != IBV_QPS_ERR) {
3168 		/* The connection is alive, so process the request as normal */
3169 		rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
3170 	} else {
3171 		/* The connection is dead. Move the request directly to the completed state. */
3172 		rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3173 	}
3174 
3175 	spdk_nvmf_rdma_request_process(rtransport, rdma_req);
3176 
3177 	return 0;
3178 }
3179 
3180 static int
3181 spdk_nvmf_rdma_destroy_defunct_qpair(void *ctx)
3182 {
3183 	struct spdk_nvmf_rdma_qpair	*rqpair = ctx;
3184 	struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
3185 			struct spdk_nvmf_rdma_transport, transport);
3186 
3187 	spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
3188 	spdk_nvmf_rdma_qpair_destroy(rqpair);
3189 
3190 	return 0;
3191 }
3192 
3193 static void
3194 spdk_nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair)
3195 {
3196 	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3197 
3198 	if (rqpair->disconnect_flags & RDMA_QP_DISCONNECTING) {
3199 		return;
3200 	}
3201 
3202 	rqpair->disconnect_flags |= RDMA_QP_DISCONNECTING;
3203 
3204 	/* This happens only when the qpair is disconnected before
3205 	 * it is added to the poll group. Since there is no poll group,
3206 	 * the RDMA qp has not been initialized yet and the RDMA CM
3207 	 * event has not yet been acknowledged, so we need to reject it.
3208 	 */
3209 	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
3210 		spdk_nvmf_rdma_qpair_reject_connection(rqpair);
3211 		return;
3212 	}
3213 
3214 	if (rqpair->ibv_state != IBV_QPS_ERR) {
3215 		spdk_nvmf_rdma_set_ibv_state(rqpair, IBV_QPS_ERR);
3216 	}
3217 
3218 	rqpair->destruct_poller = spdk_poller_register(spdk_nvmf_rdma_destroy_defunct_qpair, (void *)rqpair,
3219 				  NVMF_RDMA_QPAIR_DESTROY_TIMEOUT_US);
3220 }
3221 
3222 static struct spdk_nvmf_rdma_qpair *
3223 get_rdma_qpair_from_wc(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_wc *wc)
3224 {
3225 	struct spdk_nvmf_rdma_qpair *rqpair;
3226 	/* @todo: improve QP search */
3227 	TAILQ_FOREACH(rqpair, &rpoller->qpairs, link) {
3228 		if (wc->qp_num == rqpair->cm_id->qp->qp_num) {
3229 			return rqpair;
3230 		}
3231 	}
3232 	SPDK_ERRLOG("Didn't find QP with qp_num %u\n", wc->qp_num);
3233 	return NULL;
3234 }
3235 
3236 #ifdef DEBUG
3237 static int
3238 spdk_nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
3239 {
3240 	return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
3241 	       rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
3242 }
3243 #endif
3244 
3245 static void
3246 _poller_reset_failed_recvs(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_recv_wr *bad_recv_wr,
3247 			   int rc)
3248 {
3249 	struct spdk_nvmf_rdma_recv	*rdma_recv;
3250 	struct spdk_nvmf_rdma_wr	*bad_rdma_wr;
3251 
3252 	SPDK_ERRLOG("Failed to post a recv for the poller %p with errno %d\n", rpoller, -rc);
3253 	while (bad_recv_wr != NULL) {
3254 		bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_recv_wr->wr_id;
3255 		rdma_recv = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
3256 
3257 		rdma_recv->qpair->current_recv_depth++;
3258 		bad_recv_wr = bad_recv_wr->next;
3259 		SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rdma_recv->qpair, -rc);
3260 		spdk_nvmf_rdma_start_disconnect(rdma_recv->qpair);
3261 	}
3262 }
3263 
3264 static void
3265 _qp_reset_failed_recvs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *bad_recv_wr, int rc)
3266 {
3267 	SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rqpair, -rc);
3268 	while (bad_recv_wr != NULL) {
3269 		bad_recv_wr = bad_recv_wr->next;
3270 		rqpair->current_recv_depth++;
3271 	}
3272 	spdk_nvmf_rdma_start_disconnect(rqpair);
3273 }
3274 
3275 static void
3276 _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
3277 		     struct spdk_nvmf_rdma_poller *rpoller)
3278 {
3279 	struct spdk_nvmf_rdma_qpair	*rqpair;
3280 	struct ibv_recv_wr		*bad_recv_wr;
3281 	int				rc;
3282 
3283 	if (rpoller->srq) {
3284 		if (rpoller->resources->recvs_to_post.first != NULL) {
3285 			rc = ibv_post_srq_recv(rpoller->srq, rpoller->resources->recvs_to_post.first, &bad_recv_wr);
3286 			if (rc) {
3287 				_poller_reset_failed_recvs(rpoller, bad_recv_wr, rc);
3288 			}
3289 			rpoller->resources->recvs_to_post.first = NULL;
3290 			rpoller->resources->recvs_to_post.last = NULL;
3291 		}
3292 	} else {
3293 		while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
3294 			rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
3295 			assert(rqpair->resources->recvs_to_post.first != NULL);
3296 			rc = ibv_post_recv(rqpair->cm_id->qp, rqpair->resources->recvs_to_post.first, &bad_recv_wr);
3297 			if (rc) {
3298 				_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
3299 			}
3300 			rqpair->resources->recvs_to_post.first = NULL;
3301 			rqpair->resources->recvs_to_post.last = NULL;
3302 			STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
3303 		}
3304 	}
3305 }
3306 
3307 static void
3308 _qp_reset_failed_sends(struct spdk_nvmf_rdma_transport *rtransport,
3309 		       struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *bad_wr, int rc)
3310 {
3311 	struct spdk_nvmf_rdma_wr	*bad_rdma_wr;
3312 	struct spdk_nvmf_rdma_request	*prev_rdma_req = NULL, *cur_rdma_req = NULL;
3313 
3314 	SPDK_ERRLOG("Failed to post a send for the qpair %p with errno %d\n", rqpair, -rc);
3315 	for (; bad_wr != NULL; bad_wr = bad_wr->next) {
3316 		bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_wr->wr_id;
3317 		assert(rqpair->current_send_depth > 0);
3318 		rqpair->current_send_depth--;
3319 		switch (bad_rdma_wr->type) {
3320 		case RDMA_WR_TYPE_DATA:
3321 			cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
3322 			if (bad_wr->opcode == IBV_WR_RDMA_READ) {
3323 				assert(rqpair->current_read_depth > 0);
3324 				rqpair->current_read_depth--;
3325 			}
3326 			break;
3327 		case RDMA_WR_TYPE_SEND:
3328 			cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
3329 			break;
3330 		default:
3331 			SPDK_ERRLOG("Found a RECV in the list of pending SEND requests for qpair %p\n", rqpair);
3332 			prev_rdma_req = cur_rdma_req;
3333 			continue;
3334 		}
3335 
3336 		if (prev_rdma_req == cur_rdma_req) {
3337 			/* this request was handled by an earlier wr. i.e. we were performing an nvme read. */
3338 			/* We only have to check against prev_wr since each requests wrs are contiguous in this list. */
3339 			continue;
3340 		}
3341 
3342 		switch (cur_rdma_req->state) {
3343 		case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
3344 			cur_rdma_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
3345 			cur_rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
3346 			break;
3347 		case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
3348 		case RDMA_REQUEST_STATE_COMPLETING:
3349 			cur_rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3350 			break;
3351 		default:
3352 			SPDK_ERRLOG("Found a request in a bad state %d when draining pending SEND requests for qpair %p\n",
3353 				    cur_rdma_req->state, rqpair);
3354 			continue;
3355 		}
3356 
3357 		spdk_nvmf_rdma_request_process(rtransport, cur_rdma_req);
3358 		prev_rdma_req = cur_rdma_req;
3359 	}
3360 
3361 	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
3362 		/* Disconnect the connection. */
3363 		spdk_nvmf_rdma_start_disconnect(rqpair);
3364 	}
3365 
3366 }
3367 
3368 static void
3369 _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
3370 		     struct spdk_nvmf_rdma_poller *rpoller)
3371 {
3372 	struct spdk_nvmf_rdma_qpair	*rqpair;
3373 	struct ibv_send_wr		*bad_wr = NULL;
3374 	int				rc;
3375 
3376 	while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
3377 		rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
3378 		assert(rqpair->sends_to_post.first != NULL);
3379 		rc = ibv_post_send(rqpair->cm_id->qp, rqpair->sends_to_post.first, &bad_wr);
3380 
3381 		/* bad wr always points to the first wr that failed. */
3382 		if (rc) {
3383 			_qp_reset_failed_sends(rtransport, rqpair, bad_wr, rc);
3384 		}
3385 		rqpair->sends_to_post.first = NULL;
3386 		rqpair->sends_to_post.last = NULL;
3387 		STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_send, send_link);
3388 	}
3389 }
3390 
3391 static int
3392 spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
3393 			   struct spdk_nvmf_rdma_poller *rpoller)
3394 {
3395 	struct ibv_wc wc[32];
3396 	struct spdk_nvmf_rdma_wr	*rdma_wr;
3397 	struct spdk_nvmf_rdma_request	*rdma_req;
3398 	struct spdk_nvmf_rdma_recv	*rdma_recv;
3399 	struct spdk_nvmf_rdma_qpair	*rqpair;
3400 	int reaped, i;
3401 	int count = 0;
3402 	bool error = false;
3403 
3404 	/* Poll for completing operations. */
3405 	reaped = ibv_poll_cq(rpoller->cq, 32, wc);
3406 	if (reaped < 0) {
3407 		SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
3408 			    errno, spdk_strerror(errno));
3409 		return -1;
3410 	}
3411 
3412 	for (i = 0; i < reaped; i++) {
3413 
3414 		rdma_wr = (struct spdk_nvmf_rdma_wr *)wc[i].wr_id;
3415 
3416 		switch (rdma_wr->type) {
3417 		case RDMA_WR_TYPE_SEND:
3418 			rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
3419 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
3420 
3421 			if (!wc[i].status) {
3422 				count++;
3423 				assert(wc[i].opcode == IBV_WC_SEND);
3424 				assert(spdk_nvmf_rdma_req_is_completing(rdma_req));
3425 			} else {
3426 				SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
3427 			}
3428 
3429 			rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3430 			rqpair->current_send_depth--;
3431 
3432 			spdk_nvmf_rdma_request_process(rtransport, rdma_req);
3433 			assert(rdma_req->num_outstanding_data_wr == 0);
3434 			break;
3435 		case RDMA_WR_TYPE_RECV:
3436 			/* rdma_recv->qpair will be invalid if using an SRQ.  In that case we have to get the qpair from the wc. */
3437 			rdma_recv = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
3438 			if (rpoller->srq != NULL) {
3439 				rdma_recv->qpair = get_rdma_qpair_from_wc(rpoller, &wc[i]);
3440 			}
3441 			rqpair = rdma_recv->qpair;
3442 
3443 			assert(rqpair != NULL);
3444 			if (!wc[i].status) {
3445 				assert(wc[i].opcode == IBV_WC_RECV);
3446 				if (rqpair->current_recv_depth >= rqpair->max_queue_depth) {
3447 					spdk_nvmf_rdma_start_disconnect(rqpair);
3448 					break;
3449 				}
3450 			}
3451 
3452 			rdma_recv->wr.next = NULL;
3453 			rqpair->current_recv_depth++;
3454 			STAILQ_INSERT_TAIL(&rqpair->resources->incoming_queue, rdma_recv, link);
3455 			break;
3456 		case RDMA_WR_TYPE_DATA:
3457 			rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
3458 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
3459 
3460 			assert(rdma_req->num_outstanding_data_wr > 0);
3461 
3462 			rqpair->current_send_depth--;
3463 			rdma_req->num_outstanding_data_wr--;
3464 			if (!wc[i].status) {
3465 				if (wc[i].opcode == IBV_WC_RDMA_READ) {
3466 					rqpair->current_read_depth--;
3467 					/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
3468 					if (rdma_req->num_outstanding_data_wr == 0) {
3469 						rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
3470 						spdk_nvmf_rdma_request_process(rtransport, rdma_req);
3471 					}
3472 				} else {
3473 					assert(wc[i].opcode == IBV_WC_RDMA_WRITE);
3474 				}
3475 			} else {
3476 				/* If the data transfer fails still force the queue into the error state,
3477 				 * if we were performing an RDMA_READ, we need to force the request into a
3478 				 * completed state since it wasn't linked to a send. However, in the RDMA_WRITE
3479 				 * case, we should wait for the SEND to complete. */
3480 				SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
3481 				if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
3482 					rqpair->current_read_depth--;
3483 					if (rdma_req->num_outstanding_data_wr == 0) {
3484 						rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3485 					}
3486 				}
3487 			}
3488 			break;
3489 		default:
3490 			SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
3491 			continue;
3492 		}
3493 
3494 		/* Handle error conditions */
3495 		if (wc[i].status) {
3496 			SPDK_DEBUGLOG(SPDK_LOG_RDMA, "CQ error on CQ %p, Request 0x%lu (%d): %s\n",
3497 				      rpoller->cq, wc[i].wr_id, wc[i].status, ibv_wc_status_str(wc[i].status));
3498 
3499 			error = true;
3500 
3501 			if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
3502 				/* Disconnect the connection. */
3503 				spdk_nvmf_rdma_start_disconnect(rqpair);
3504 			} else {
3505 				nvmf_rdma_destroy_drained_qpair(rqpair);
3506 			}
3507 			continue;
3508 		}
3509 
3510 		spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);
3511 
3512 		if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
3513 			nvmf_rdma_destroy_drained_qpair(rqpair);
3514 		}
3515 	}
3516 
3517 	if (error == true) {
3518 		return -1;
3519 	}
3520 
3521 	/* submit outstanding work requests. */
3522 	_poller_submit_recvs(rtransport, rpoller);
3523 	_poller_submit_sends(rtransport, rpoller);
3524 
3525 	return count;
3526 }
3527 
3528 static int
3529 spdk_nvmf_rdma_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
3530 {
3531 	struct spdk_nvmf_rdma_transport *rtransport;
3532 	struct spdk_nvmf_rdma_poll_group *rgroup;
3533 	struct spdk_nvmf_rdma_poller	*rpoller;
3534 	int				count, rc;
3535 
3536 	rtransport = SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_rdma_transport, transport);
3537 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
3538 
3539 	count = 0;
3540 	TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
3541 		rc = spdk_nvmf_rdma_poller_poll(rtransport, rpoller);
3542 		if (rc < 0) {
3543 			return rc;
3544 		}
3545 		count += rc;
3546 	}
3547 
3548 	return count;
3549 }
3550 
3551 static int
3552 spdk_nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
3553 			       struct spdk_nvme_transport_id *trid,
3554 			       bool peer)
3555 {
3556 	struct sockaddr *saddr;
3557 	uint16_t port;
3558 
3559 	trid->trtype = SPDK_NVME_TRANSPORT_RDMA;
3560 
3561 	if (peer) {
3562 		saddr = rdma_get_peer_addr(id);
3563 	} else {
3564 		saddr = rdma_get_local_addr(id);
3565 	}
3566 	switch (saddr->sa_family) {
3567 	case AF_INET: {
3568 		struct sockaddr_in *saddr_in = (struct sockaddr_in *)saddr;
3569 
3570 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV4;
3571 		inet_ntop(AF_INET, &saddr_in->sin_addr,
3572 			  trid->traddr, sizeof(trid->traddr));
3573 		if (peer) {
3574 			port = ntohs(rdma_get_dst_port(id));
3575 		} else {
3576 			port = ntohs(rdma_get_src_port(id));
3577 		}
3578 		snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port);
3579 		break;
3580 	}
3581 	case AF_INET6: {
3582 		struct sockaddr_in6 *saddr_in = (struct sockaddr_in6 *)saddr;
3583 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV6;
3584 		inet_ntop(AF_INET6, &saddr_in->sin6_addr,
3585 			  trid->traddr, sizeof(trid->traddr));
3586 		if (peer) {
3587 			port = ntohs(rdma_get_dst_port(id));
3588 		} else {
3589 			port = ntohs(rdma_get_src_port(id));
3590 		}
3591 		snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port);
3592 		break;
3593 	}
3594 	default:
3595 		return -1;
3596 
3597 	}
3598 
3599 	return 0;
3600 }
3601 
3602 static int
3603 spdk_nvmf_rdma_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
3604 				   struct spdk_nvme_transport_id *trid)
3605 {
3606 	struct spdk_nvmf_rdma_qpair	*rqpair;
3607 
3608 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3609 
3610 	return spdk_nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, true);
3611 }
3612 
3613 static int
3614 spdk_nvmf_rdma_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
3615 				    struct spdk_nvme_transport_id *trid)
3616 {
3617 	struct spdk_nvmf_rdma_qpair	*rqpair;
3618 
3619 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3620 
3621 	return spdk_nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, false);
3622 }
3623 
3624 static int
3625 spdk_nvmf_rdma_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
3626 				     struct spdk_nvme_transport_id *trid)
3627 {
3628 	struct spdk_nvmf_rdma_qpair	*rqpair;
3629 
3630 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3631 
3632 	return spdk_nvmf_rdma_trid_from_cm_id(rqpair->listen_id, trid, false);
3633 }
3634 
3635 void
3636 spdk_nvmf_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
3637 {
3638 	g_nvmf_hooks = *hooks;
3639 }
3640 
3641 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
3642 	.type = SPDK_NVME_TRANSPORT_RDMA,
3643 	.opts_init = spdk_nvmf_rdma_opts_init,
3644 	.create = spdk_nvmf_rdma_create,
3645 	.destroy = spdk_nvmf_rdma_destroy,
3646 
3647 	.listen = spdk_nvmf_rdma_listen,
3648 	.stop_listen = spdk_nvmf_rdma_stop_listen,
3649 	.accept = spdk_nvmf_rdma_accept,
3650 
3651 	.listener_discover = spdk_nvmf_rdma_discover,
3652 
3653 	.poll_group_create = spdk_nvmf_rdma_poll_group_create,
3654 	.poll_group_destroy = spdk_nvmf_rdma_poll_group_destroy,
3655 	.poll_group_add = spdk_nvmf_rdma_poll_group_add,
3656 	.poll_group_poll = spdk_nvmf_rdma_poll_group_poll,
3657 
3658 	.req_free = spdk_nvmf_rdma_request_free,
3659 	.req_complete = spdk_nvmf_rdma_request_complete,
3660 
3661 	.qpair_fini = spdk_nvmf_rdma_close_qpair,
3662 	.qpair_get_peer_trid = spdk_nvmf_rdma_qpair_get_peer_trid,
3663 	.qpair_get_local_trid = spdk_nvmf_rdma_qpair_get_local_trid,
3664 	.qpair_get_listen_trid = spdk_nvmf_rdma_qpair_get_listen_trid,
3665 
3666 };
3667 
3668 SPDK_LOG_REGISTER_COMPONENT("rdma", SPDK_LOG_RDMA)
3669