xref: /spdk/lib/nvmf/rdma.c (revision fecffda6ecf8853b82edccde429b68252f0a62c5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019-2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/config.h"
10 #include "spdk/thread.h"
11 #include "spdk/likely.h"
12 #include "spdk/nvmf_transport.h"
13 #include "spdk/string.h"
14 #include "spdk/trace.h"
15 #include "spdk/tree.h"
16 #include "spdk/util.h"
17 
18 #include "spdk_internal/assert.h"
19 #include "spdk/log.h"
20 #include "spdk_internal/rdma.h"
21 
22 #include "nvmf_internal.h"
23 #include "transport.h"
24 
25 #include "spdk_internal/trace_defs.h"
26 
27 struct spdk_nvme_rdma_hooks g_nvmf_hooks = {};
28 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma;
29 
30 /*
31  RDMA Connection Resource Defaults
32  */
33 #define NVMF_DEFAULT_TX_SGE		SPDK_NVMF_MAX_SGL_ENTRIES
34 #define NVMF_DEFAULT_RSP_SGE		1
35 #define NVMF_DEFAULT_RX_SGE		2
36 
37 /* The RDMA completion queue size */
38 #define DEFAULT_NVMF_RDMA_CQ_SIZE	4096
39 #define MAX_WR_PER_QP(queue_depth)	(queue_depth * 3 + 2)
40 
41 static int g_spdk_nvmf_ibv_query_mask =
42 	IBV_QP_STATE |
43 	IBV_QP_PKEY_INDEX |
44 	IBV_QP_PORT |
45 	IBV_QP_ACCESS_FLAGS |
46 	IBV_QP_AV |
47 	IBV_QP_PATH_MTU |
48 	IBV_QP_DEST_QPN |
49 	IBV_QP_RQ_PSN |
50 	IBV_QP_MAX_DEST_RD_ATOMIC |
51 	IBV_QP_MIN_RNR_TIMER |
52 	IBV_QP_SQ_PSN |
53 	IBV_QP_TIMEOUT |
54 	IBV_QP_RETRY_CNT |
55 	IBV_QP_RNR_RETRY |
56 	IBV_QP_MAX_QP_RD_ATOMIC;
57 
58 enum spdk_nvmf_rdma_request_state {
59 	/* The request is not currently in use */
60 	RDMA_REQUEST_STATE_FREE = 0,
61 
62 	/* Initial state when request first received */
63 	RDMA_REQUEST_STATE_NEW,
64 
65 	/* The request is queued until a data buffer is available. */
66 	RDMA_REQUEST_STATE_NEED_BUFFER,
67 
68 	/* The request is waiting on RDMA queue depth availability
69 	 * to transfer data from the host to the controller.
70 	 */
71 	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
72 
73 	/* The request is currently transferring data from the host to the controller. */
74 	RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
75 
76 	/* The request is ready to execute at the block device */
77 	RDMA_REQUEST_STATE_READY_TO_EXECUTE,
78 
79 	/* The request is currently executing at the block device */
80 	RDMA_REQUEST_STATE_EXECUTING,
81 
82 	/* The request finished executing at the block device */
83 	RDMA_REQUEST_STATE_EXECUTED,
84 
85 	/* The request is waiting on RDMA queue depth availability
86 	 * to transfer data from the controller to the host.
87 	 */
88 	RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
89 
90 	/* The request is ready to send a completion */
91 	RDMA_REQUEST_STATE_READY_TO_COMPLETE,
92 
93 	/* The request is currently transferring data from the controller to the host. */
94 	RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
95 
96 	/* The request currently has an outstanding completion without an
97 	 * associated data transfer.
98 	 */
99 	RDMA_REQUEST_STATE_COMPLETING,
100 
101 	/* The request completed and can be marked free. */
102 	RDMA_REQUEST_STATE_COMPLETED,
103 
104 	/* Terminator */
105 	RDMA_REQUEST_NUM_STATES,
106 };
107 
108 SPDK_TRACE_REGISTER_FN(nvmf_trace, "nvmf_rdma", TRACE_GROUP_NVMF_RDMA)
109 {
110 	spdk_trace_register_object(OBJECT_NVMF_RDMA_IO, 'r');
111 	spdk_trace_register_description("RDMA_REQ_NEW", TRACE_RDMA_REQUEST_STATE_NEW,
112 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 1,
113 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
114 	spdk_trace_register_description("RDMA_REQ_NEED_BUFFER", TRACE_RDMA_REQUEST_STATE_NEED_BUFFER,
115 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
116 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
117 	spdk_trace_register_description("RDMA_REQ_TX_PENDING_C2H",
118 					TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING,
119 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
120 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
121 	spdk_trace_register_description("RDMA_REQ_TX_PENDING_H2C",
122 					TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING,
123 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
124 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
125 	spdk_trace_register_description("RDMA_REQ_TX_H2C",
126 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER,
127 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
128 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
129 	spdk_trace_register_description("RDMA_REQ_RDY_TO_EXECUTE",
130 					TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE,
131 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
132 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
133 	spdk_trace_register_description("RDMA_REQ_EXECUTING",
134 					TRACE_RDMA_REQUEST_STATE_EXECUTING,
135 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
136 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
137 	spdk_trace_register_description("RDMA_REQ_EXECUTED",
138 					TRACE_RDMA_REQUEST_STATE_EXECUTED,
139 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
140 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
141 	spdk_trace_register_description("RDMA_REQ_RDY_TO_COMPL",
142 					TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE,
143 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
144 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
145 	spdk_trace_register_description("RDMA_REQ_COMPLETING_C2H",
146 					TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST,
147 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
148 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
149 	spdk_trace_register_description("RDMA_REQ_COMPLETING",
150 					TRACE_RDMA_REQUEST_STATE_COMPLETING,
151 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
152 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
153 	spdk_trace_register_description("RDMA_REQ_COMPLETED",
154 					TRACE_RDMA_REQUEST_STATE_COMPLETED,
155 					OWNER_NONE, OBJECT_NVMF_RDMA_IO, 0,
156 					SPDK_TRACE_ARG_TYPE_PTR, "qpair");
157 
158 	spdk_trace_register_description("RDMA_QP_CREATE", TRACE_RDMA_QP_CREATE,
159 					OWNER_NONE, OBJECT_NONE, 0,
160 					SPDK_TRACE_ARG_TYPE_INT, "");
161 	spdk_trace_register_description("RDMA_IBV_ASYNC_EVENT", TRACE_RDMA_IBV_ASYNC_EVENT,
162 					OWNER_NONE, OBJECT_NONE, 0,
163 					SPDK_TRACE_ARG_TYPE_INT, "type");
164 	spdk_trace_register_description("RDMA_CM_ASYNC_EVENT", TRACE_RDMA_CM_ASYNC_EVENT,
165 					OWNER_NONE, OBJECT_NONE, 0,
166 					SPDK_TRACE_ARG_TYPE_INT, "type");
167 	spdk_trace_register_description("RDMA_QP_STATE_CHANGE", TRACE_RDMA_QP_STATE_CHANGE,
168 					OWNER_NONE, OBJECT_NONE, 0,
169 					SPDK_TRACE_ARG_TYPE_PTR, "state");
170 	spdk_trace_register_description("RDMA_QP_DISCONNECT", TRACE_RDMA_QP_DISCONNECT,
171 					OWNER_NONE, OBJECT_NONE, 0,
172 					SPDK_TRACE_ARG_TYPE_INT, "");
173 	spdk_trace_register_description("RDMA_QP_DESTROY", TRACE_RDMA_QP_DESTROY,
174 					OWNER_NONE, OBJECT_NONE, 0,
175 					SPDK_TRACE_ARG_TYPE_INT, "");
176 }
177 
178 enum spdk_nvmf_rdma_wr_type {
179 	RDMA_WR_TYPE_RECV,
180 	RDMA_WR_TYPE_SEND,
181 	RDMA_WR_TYPE_DATA,
182 };
183 
184 struct spdk_nvmf_rdma_wr {
185 	enum spdk_nvmf_rdma_wr_type	type;
186 };
187 
188 /* This structure holds commands as they are received off the wire.
189  * It must be dynamically paired with a full request object
190  * (spdk_nvmf_rdma_request) to service a request. It is separate
191  * from the request because RDMA does not appear to order
192  * completions, so occasionally we'll get a new incoming
193  * command when there aren't any free request objects.
194  */
195 struct spdk_nvmf_rdma_recv {
196 	struct ibv_recv_wr			wr;
197 	struct ibv_sge				sgl[NVMF_DEFAULT_RX_SGE];
198 
199 	struct spdk_nvmf_rdma_qpair		*qpair;
200 
201 	/* In-capsule data buffer */
202 	uint8_t					*buf;
203 
204 	struct spdk_nvmf_rdma_wr		rdma_wr;
205 	uint64_t				receive_tsc;
206 
207 	STAILQ_ENTRY(spdk_nvmf_rdma_recv)	link;
208 };
209 
210 struct spdk_nvmf_rdma_request_data {
211 	struct spdk_nvmf_rdma_wr	rdma_wr;
212 	struct ibv_send_wr		wr;
213 	struct ibv_sge			sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
214 };
215 
216 struct spdk_nvmf_rdma_request {
217 	struct spdk_nvmf_request		req;
218 
219 	enum spdk_nvmf_rdma_request_state	state;
220 
221 	/* Data offset in req.iov */
222 	uint32_t				offset;
223 
224 	struct spdk_nvmf_rdma_recv		*recv;
225 
226 	struct {
227 		struct spdk_nvmf_rdma_wr	rdma_wr;
228 		struct	ibv_send_wr		wr;
229 		struct	ibv_sge			sgl[NVMF_DEFAULT_RSP_SGE];
230 	} rsp;
231 
232 	struct spdk_nvmf_rdma_request_data	data;
233 
234 	uint32_t				iovpos;
235 
236 	uint32_t				num_outstanding_data_wr;
237 	uint64_t				receive_tsc;
238 
239 	bool					fused_failed;
240 	struct spdk_nvmf_rdma_request		*fused_pair;
241 
242 	STAILQ_ENTRY(spdk_nvmf_rdma_request)	state_link;
243 };
244 
245 struct spdk_nvmf_rdma_resource_opts {
246 	struct spdk_nvmf_rdma_qpair	*qpair;
247 	/* qp points either to an ibv_qp object or an ibv_srq object depending on the value of shared. */
248 	void				*qp;
249 	struct spdk_rdma_mem_map	*map;
250 	uint32_t			max_queue_depth;
251 	uint32_t			in_capsule_data_size;
252 	bool				shared;
253 };
254 
255 struct spdk_nvmf_rdma_resources {
256 	/* Array of size "max_queue_depth" containing RDMA requests. */
257 	struct spdk_nvmf_rdma_request		*reqs;
258 
259 	/* Array of size "max_queue_depth" containing RDMA recvs. */
260 	struct spdk_nvmf_rdma_recv		*recvs;
261 
262 	/* Array of size "max_queue_depth" containing 64 byte capsules
263 	 * used for receive.
264 	 */
265 	union nvmf_h2c_msg			*cmds;
266 
267 	/* Array of size "max_queue_depth" containing 16 byte completions
268 	 * to be sent back to the user.
269 	 */
270 	union nvmf_c2h_msg			*cpls;
271 
272 	/* Array of size "max_queue_depth * InCapsuleDataSize" containing
273 	 * buffers to be used for in capsule data.
274 	 */
275 	void					*bufs;
276 
277 	/* Receives that are waiting for a request object */
278 	STAILQ_HEAD(, spdk_nvmf_rdma_recv)	incoming_queue;
279 
280 	/* Queue to track free requests */
281 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	free_queue;
282 };
283 
284 typedef void (*spdk_nvmf_rdma_qpair_ibv_event)(struct spdk_nvmf_rdma_qpair *rqpair);
285 
286 struct spdk_nvmf_rdma_ibv_event_ctx {
287 	struct spdk_nvmf_rdma_qpair			*rqpair;
288 	spdk_nvmf_rdma_qpair_ibv_event			cb_fn;
289 	/* Link to other ibv events associated with this qpair */
290 	STAILQ_ENTRY(spdk_nvmf_rdma_ibv_event_ctx)	link;
291 };
292 
293 struct spdk_nvmf_rdma_qpair {
294 	struct spdk_nvmf_qpair			qpair;
295 
296 	struct spdk_nvmf_rdma_device		*device;
297 	struct spdk_nvmf_rdma_poller		*poller;
298 
299 	struct spdk_rdma_qp			*rdma_qp;
300 	struct rdma_cm_id			*cm_id;
301 	struct spdk_rdma_srq			*srq;
302 	struct rdma_cm_id			*listen_id;
303 
304 	/* Cache the QP number to improve QP search by RB tree. */
305 	uint32_t				qp_num;
306 
307 	/* The maximum number of I/O outstanding on this connection at one time */
308 	uint16_t				max_queue_depth;
309 
310 	/* The maximum number of active RDMA READ and ATOMIC operations at one time */
311 	uint16_t				max_read_depth;
312 
313 	/* The maximum number of RDMA SEND operations at one time */
314 	uint32_t				max_send_depth;
315 
316 	/* The current number of outstanding WRs from this qpair's
317 	 * recv queue. Should not exceed device->attr.max_queue_depth.
318 	 */
319 	uint16_t				current_recv_depth;
320 
321 	/* The current number of active RDMA READ operations */
322 	uint16_t				current_read_depth;
323 
324 	/* The current number of posted WRs from this qpair's
325 	 * send queue. Should not exceed max_send_depth.
326 	 */
327 	uint32_t				current_send_depth;
328 
329 	/* The maximum number of SGEs per WR on the send queue */
330 	uint32_t				max_send_sge;
331 
332 	/* The maximum number of SGEs per WR on the recv queue */
333 	uint32_t				max_recv_sge;
334 
335 	struct spdk_nvmf_rdma_resources		*resources;
336 
337 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_rdma_read_queue;
338 
339 	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_rdma_write_queue;
340 
341 	/* Number of requests not in the free state */
342 	uint32_t				qd;
343 
344 	RB_ENTRY(spdk_nvmf_rdma_qpair)		node;
345 
346 	STAILQ_ENTRY(spdk_nvmf_rdma_qpair)	recv_link;
347 
348 	STAILQ_ENTRY(spdk_nvmf_rdma_qpair)	send_link;
349 
350 	/* IBV queue pair attributes: they are used to manage
351 	 * qp state and recover from errors.
352 	 */
353 	enum ibv_qp_state			ibv_state;
354 
355 	/* Points to the a request that has fuse bits set to
356 	 * SPDK_NVME_CMD_FUSE_FIRST, when the qpair is waiting
357 	 * for the request that has SPDK_NVME_CMD_FUSE_SECOND.
358 	 */
359 	struct spdk_nvmf_rdma_request		*fused_first;
360 
361 	/*
362 	 * io_channel which is used to destroy qpair when it is removed from poll group
363 	 */
364 	struct spdk_io_channel		*destruct_channel;
365 
366 	/* List of ibv async events */
367 	STAILQ_HEAD(, spdk_nvmf_rdma_ibv_event_ctx)	ibv_events;
368 
369 	/* Lets us know that we have received the last_wqe event. */
370 	bool					last_wqe_reached;
371 
372 	/* Indicate that nvmf_rdma_close_qpair is called */
373 	bool					to_close;
374 };
375 
376 struct spdk_nvmf_rdma_poller_stat {
377 	uint64_t				completions;
378 	uint64_t				polls;
379 	uint64_t				idle_polls;
380 	uint64_t				requests;
381 	uint64_t				request_latency;
382 	uint64_t				pending_free_request;
383 	uint64_t				pending_rdma_read;
384 	uint64_t				pending_rdma_write;
385 	struct spdk_rdma_qp_stats		qp_stats;
386 };
387 
388 struct spdk_nvmf_rdma_poller {
389 	struct spdk_nvmf_rdma_device		*device;
390 	struct spdk_nvmf_rdma_poll_group	*group;
391 
392 	int					num_cqe;
393 	int					required_num_wr;
394 	struct ibv_cq				*cq;
395 
396 	/* The maximum number of I/O outstanding on the shared receive queue at one time */
397 	uint16_t				max_srq_depth;
398 
399 	/* Shared receive queue */
400 	struct spdk_rdma_srq			*srq;
401 
402 	struct spdk_nvmf_rdma_resources		*resources;
403 	struct spdk_nvmf_rdma_poller_stat	stat;
404 
405 	RB_HEAD(qpairs_tree, spdk_nvmf_rdma_qpair) qpairs;
406 
407 	STAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs_pending_recv;
408 
409 	STAILQ_HEAD(, spdk_nvmf_rdma_qpair)	qpairs_pending_send;
410 
411 	TAILQ_ENTRY(spdk_nvmf_rdma_poller)	link;
412 };
413 
414 struct spdk_nvmf_rdma_poll_group_stat {
415 	uint64_t				pending_data_buffer;
416 };
417 
418 struct spdk_nvmf_rdma_poll_group {
419 	struct spdk_nvmf_transport_poll_group		group;
420 	struct spdk_nvmf_rdma_poll_group_stat		stat;
421 	TAILQ_HEAD(, spdk_nvmf_rdma_poller)		pollers;
422 	TAILQ_ENTRY(spdk_nvmf_rdma_poll_group)		link;
423 };
424 
425 struct spdk_nvmf_rdma_conn_sched {
426 	struct spdk_nvmf_rdma_poll_group *next_admin_pg;
427 	struct spdk_nvmf_rdma_poll_group *next_io_pg;
428 };
429 
430 /* Assuming rdma_cm uses just one protection domain per ibv_context. */
431 struct spdk_nvmf_rdma_device {
432 	struct ibv_device_attr			attr;
433 	struct ibv_context			*context;
434 
435 	struct spdk_rdma_mem_map		*map;
436 	struct ibv_pd				*pd;
437 
438 	int					num_srq;
439 
440 	TAILQ_ENTRY(spdk_nvmf_rdma_device)	link;
441 };
442 
443 struct spdk_nvmf_rdma_port {
444 	const struct spdk_nvme_transport_id	*trid;
445 	struct rdma_cm_id			*id;
446 	struct spdk_nvmf_rdma_device		*device;
447 	TAILQ_ENTRY(spdk_nvmf_rdma_port)	link;
448 };
449 
450 struct rdma_transport_opts {
451 	int		num_cqe;
452 	uint32_t	max_srq_depth;
453 	bool		no_srq;
454 	bool		no_wr_batching;
455 	int		acceptor_backlog;
456 };
457 
458 struct spdk_nvmf_rdma_transport {
459 	struct spdk_nvmf_transport	transport;
460 	struct rdma_transport_opts	rdma_opts;
461 
462 	struct spdk_nvmf_rdma_conn_sched conn_sched;
463 
464 	struct rdma_event_channel	*event_channel;
465 
466 	struct spdk_mempool		*data_wr_pool;
467 
468 	struct spdk_poller		*accept_poller;
469 
470 	/* fields used to poll RDMA/IB events */
471 	nfds_t			npoll_fds;
472 	struct pollfd		*poll_fds;
473 
474 	TAILQ_HEAD(, spdk_nvmf_rdma_device)	devices;
475 	TAILQ_HEAD(, spdk_nvmf_rdma_port)	ports;
476 	TAILQ_HEAD(, spdk_nvmf_rdma_poll_group)	poll_groups;
477 };
478 
479 static const struct spdk_json_object_decoder rdma_transport_opts_decoder[] = {
480 	{
481 		"num_cqe", offsetof(struct rdma_transport_opts, num_cqe),
482 		spdk_json_decode_int32, true
483 	},
484 	{
485 		"max_srq_depth", offsetof(struct rdma_transport_opts, max_srq_depth),
486 		spdk_json_decode_uint32, true
487 	},
488 	{
489 		"no_srq", offsetof(struct rdma_transport_opts, no_srq),
490 		spdk_json_decode_bool, true
491 	},
492 	{
493 		"no_wr_batching", offsetof(struct rdma_transport_opts, no_wr_batching),
494 		spdk_json_decode_bool, true
495 	},
496 	{
497 		"acceptor_backlog", offsetof(struct rdma_transport_opts, acceptor_backlog),
498 		spdk_json_decode_int32, true
499 	},
500 };
501 
502 static int
503 nvmf_rdma_qpair_compare(struct spdk_nvmf_rdma_qpair *rqpair1, struct spdk_nvmf_rdma_qpair *rqpair2)
504 {
505 	return rqpair1->qp_num < rqpair2->qp_num ? -1 : rqpair1->qp_num > rqpair2->qp_num;
506 }
507 
508 RB_GENERATE_STATIC(qpairs_tree, spdk_nvmf_rdma_qpair, node, nvmf_rdma_qpair_compare);
509 
510 static bool nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
511 				      struct spdk_nvmf_rdma_request *rdma_req);
512 
513 static void _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
514 				 struct spdk_nvmf_rdma_poller *rpoller);
515 
516 static void _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
517 				 struct spdk_nvmf_rdma_poller *rpoller);
518 
519 static inline int
520 nvmf_rdma_check_ibv_state(enum ibv_qp_state state)
521 {
522 	switch (state) {
523 	case IBV_QPS_RESET:
524 	case IBV_QPS_INIT:
525 	case IBV_QPS_RTR:
526 	case IBV_QPS_RTS:
527 	case IBV_QPS_SQD:
528 	case IBV_QPS_SQE:
529 	case IBV_QPS_ERR:
530 		return 0;
531 	default:
532 		return -1;
533 	}
534 }
535 
536 static inline enum spdk_nvme_media_error_status_code
537 nvmf_rdma_dif_error_to_compl_status(uint8_t err_type) {
538 	enum spdk_nvme_media_error_status_code result;
539 	switch (err_type)
540 	{
541 	case SPDK_DIF_REFTAG_ERROR:
542 		result = SPDK_NVME_SC_REFERENCE_TAG_CHECK_ERROR;
543 		break;
544 	case SPDK_DIF_APPTAG_ERROR:
545 		result = SPDK_NVME_SC_APPLICATION_TAG_CHECK_ERROR;
546 		break;
547 	case SPDK_DIF_GUARD_ERROR:
548 		result = SPDK_NVME_SC_GUARD_CHECK_ERROR;
549 		break;
550 	default:
551 		SPDK_UNREACHABLE();
552 	}
553 
554 	return result;
555 }
556 
557 static enum ibv_qp_state
558 nvmf_rdma_update_ibv_state(struct spdk_nvmf_rdma_qpair *rqpair) {
559 	enum ibv_qp_state old_state, new_state;
560 	struct ibv_qp_attr qp_attr;
561 	struct ibv_qp_init_attr init_attr;
562 	int rc;
563 
564 	old_state = rqpair->ibv_state;
565 	rc = ibv_query_qp(rqpair->rdma_qp->qp, &qp_attr,
566 			  g_spdk_nvmf_ibv_query_mask, &init_attr);
567 
568 	if (rc)
569 	{
570 		SPDK_ERRLOG("Failed to get updated RDMA queue pair state!\n");
571 		return IBV_QPS_ERR + 1;
572 	}
573 
574 	new_state = qp_attr.qp_state;
575 	rqpair->ibv_state = new_state;
576 	qp_attr.ah_attr.port_num = qp_attr.port_num;
577 
578 	rc = nvmf_rdma_check_ibv_state(new_state);
579 	if (rc)
580 	{
581 		SPDK_ERRLOG("QP#%d: bad state updated: %u, maybe hardware issue\n", rqpair->qpair.qid, new_state);
582 		/*
583 		 * IBV_QPS_UNKNOWN undefined if lib version smaller than libibverbs-1.1.8
584 		 * IBV_QPS_UNKNOWN is the enum element after IBV_QPS_ERR
585 		 */
586 		return IBV_QPS_ERR + 1;
587 	}
588 
589 	if (old_state != new_state)
590 	{
591 		spdk_trace_record(TRACE_RDMA_QP_STATE_CHANGE, 0, 0, (uintptr_t)rqpair, new_state);
592 	}
593 	return new_state;
594 }
595 
596 static void
597 nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
598 			    struct spdk_nvmf_rdma_transport *rtransport)
599 {
600 	struct spdk_nvmf_rdma_request_data	*data_wr;
601 	struct ibv_send_wr			*next_send_wr;
602 	uint64_t				req_wrid;
603 
604 	rdma_req->num_outstanding_data_wr = 0;
605 	data_wr = &rdma_req->data;
606 	req_wrid = data_wr->wr.wr_id;
607 	while (data_wr && data_wr->wr.wr_id == req_wrid) {
608 		memset(data_wr->sgl, 0, sizeof(data_wr->wr.sg_list[0]) * data_wr->wr.num_sge);
609 		data_wr->wr.num_sge = 0;
610 		next_send_wr = data_wr->wr.next;
611 		if (data_wr != &rdma_req->data) {
612 			data_wr->wr.next = NULL;
613 			spdk_mempool_put(rtransport->data_wr_pool, data_wr);
614 		}
615 		data_wr = (!next_send_wr || next_send_wr == &rdma_req->rsp.wr) ? NULL :
616 			  SPDK_CONTAINEROF(next_send_wr, struct spdk_nvmf_rdma_request_data, wr);
617 	}
618 	rdma_req->data.wr.next = NULL;
619 	rdma_req->rsp.wr.next = NULL;
620 }
621 
622 static void
623 nvmf_rdma_dump_request(struct spdk_nvmf_rdma_request *req)
624 {
625 	SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->req.data_from_pool);
626 	if (req->req.cmd) {
627 		SPDK_ERRLOG("\t\tRequest opcode: %d\n", req->req.cmd->nvmf_cmd.opcode);
628 	}
629 	if (req->recv) {
630 		SPDK_ERRLOG("\t\tRequest recv wr_id%lu\n", req->recv->wr.wr_id);
631 	}
632 }
633 
634 static void
635 nvmf_rdma_dump_qpair_contents(struct spdk_nvmf_rdma_qpair *rqpair)
636 {
637 	int i;
638 
639 	SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", rqpair->qpair.qid);
640 	for (i = 0; i < rqpair->max_queue_depth; i++) {
641 		if (rqpair->resources->reqs[i].state != RDMA_REQUEST_STATE_FREE) {
642 			nvmf_rdma_dump_request(&rqpair->resources->reqs[i]);
643 		}
644 	}
645 }
646 
647 static void
648 nvmf_rdma_resources_destroy(struct spdk_nvmf_rdma_resources *resources)
649 {
650 	spdk_free(resources->cmds);
651 	spdk_free(resources->cpls);
652 	spdk_free(resources->bufs);
653 	spdk_free(resources->reqs);
654 	spdk_free(resources->recvs);
655 	free(resources);
656 }
657 
658 
659 static struct spdk_nvmf_rdma_resources *
660 nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
661 {
662 	struct spdk_nvmf_rdma_resources		*resources;
663 	struct spdk_nvmf_rdma_request		*rdma_req;
664 	struct spdk_nvmf_rdma_recv		*rdma_recv;
665 	struct spdk_rdma_qp			*qp = NULL;
666 	struct spdk_rdma_srq			*srq = NULL;
667 	struct ibv_recv_wr			*bad_wr = NULL;
668 	struct spdk_rdma_memory_translation	translation;
669 	uint32_t				i;
670 	int					rc = 0;
671 
672 	resources = calloc(1, sizeof(struct spdk_nvmf_rdma_resources));
673 	if (!resources) {
674 		SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
675 		return NULL;
676 	}
677 
678 	resources->reqs = spdk_zmalloc(opts->max_queue_depth * sizeof(*resources->reqs),
679 				       0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
680 	resources->recvs = spdk_zmalloc(opts->max_queue_depth * sizeof(*resources->recvs),
681 					0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
682 	resources->cmds = spdk_zmalloc(opts->max_queue_depth * sizeof(*resources->cmds),
683 				       0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
684 	resources->cpls = spdk_zmalloc(opts->max_queue_depth * sizeof(*resources->cpls),
685 				       0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
686 
687 	if (opts->in_capsule_data_size > 0) {
688 		resources->bufs = spdk_zmalloc(opts->max_queue_depth * opts->in_capsule_data_size,
689 					       0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
690 					       SPDK_MALLOC_DMA);
691 	}
692 
693 	if (!resources->reqs || !resources->recvs || !resources->cmds ||
694 	    !resources->cpls || (opts->in_capsule_data_size && !resources->bufs)) {
695 		SPDK_ERRLOG("Unable to allocate sufficient memory for RDMA queue.\n");
696 		goto cleanup;
697 	}
698 
699 	SPDK_DEBUGLOG(rdma, "Command Array: %p Length: %lx\n",
700 		      resources->cmds, opts->max_queue_depth * sizeof(*resources->cmds));
701 	SPDK_DEBUGLOG(rdma, "Completion Array: %p Length: %lx\n",
702 		      resources->cpls, opts->max_queue_depth * sizeof(*resources->cpls));
703 	if (resources->bufs) {
704 		SPDK_DEBUGLOG(rdma, "In Capsule Data Array: %p Length: %x\n",
705 			      resources->bufs, opts->max_queue_depth *
706 			      opts->in_capsule_data_size);
707 	}
708 
709 	/* Initialize queues */
710 	STAILQ_INIT(&resources->incoming_queue);
711 	STAILQ_INIT(&resources->free_queue);
712 
713 	if (opts->shared) {
714 		srq = (struct spdk_rdma_srq *)opts->qp;
715 	} else {
716 		qp = (struct spdk_rdma_qp *)opts->qp;
717 	}
718 
719 	for (i = 0; i < opts->max_queue_depth; i++) {
720 		rdma_recv = &resources->recvs[i];
721 		rdma_recv->qpair = opts->qpair;
722 
723 		/* Set up memory to receive commands */
724 		if (resources->bufs) {
725 			rdma_recv->buf = (void *)((uintptr_t)resources->bufs + (i *
726 						  opts->in_capsule_data_size));
727 		}
728 
729 		rdma_recv->rdma_wr.type = RDMA_WR_TYPE_RECV;
730 
731 		rdma_recv->sgl[0].addr = (uintptr_t)&resources->cmds[i];
732 		rdma_recv->sgl[0].length = sizeof(resources->cmds[i]);
733 		rc = spdk_rdma_get_translation(opts->map, &resources->cmds[i], sizeof(resources->cmds[i]),
734 					       &translation);
735 		if (rc) {
736 			goto cleanup;
737 		}
738 		rdma_recv->sgl[0].lkey = spdk_rdma_memory_translation_get_lkey(&translation);
739 		rdma_recv->wr.num_sge = 1;
740 
741 		if (rdma_recv->buf) {
742 			rdma_recv->sgl[1].addr = (uintptr_t)rdma_recv->buf;
743 			rdma_recv->sgl[1].length = opts->in_capsule_data_size;
744 			rc = spdk_rdma_get_translation(opts->map, rdma_recv->buf, opts->in_capsule_data_size, &translation);
745 			if (rc) {
746 				goto cleanup;
747 			}
748 			rdma_recv->sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&translation);
749 			rdma_recv->wr.num_sge++;
750 		}
751 
752 		rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
753 		rdma_recv->wr.sg_list = rdma_recv->sgl;
754 		if (srq) {
755 			spdk_rdma_srq_queue_recv_wrs(srq, &rdma_recv->wr);
756 		} else {
757 			spdk_rdma_qp_queue_recv_wrs(qp, &rdma_recv->wr);
758 		}
759 	}
760 
761 	for (i = 0; i < opts->max_queue_depth; i++) {
762 		rdma_req = &resources->reqs[i];
763 
764 		if (opts->qpair != NULL) {
765 			rdma_req->req.qpair = &opts->qpair->qpair;
766 		} else {
767 			rdma_req->req.qpair = NULL;
768 		}
769 		rdma_req->req.cmd = NULL;
770 		rdma_req->req.iovcnt = 0;
771 		rdma_req->req.stripped_data = NULL;
772 
773 		/* Set up memory to send responses */
774 		rdma_req->req.rsp = &resources->cpls[i];
775 
776 		rdma_req->rsp.sgl[0].addr = (uintptr_t)&resources->cpls[i];
777 		rdma_req->rsp.sgl[0].length = sizeof(resources->cpls[i]);
778 		rc = spdk_rdma_get_translation(opts->map, &resources->cpls[i], sizeof(resources->cpls[i]),
779 					       &translation);
780 		if (rc) {
781 			goto cleanup;
782 		}
783 		rdma_req->rsp.sgl[0].lkey = spdk_rdma_memory_translation_get_lkey(&translation);
784 
785 		rdma_req->rsp.rdma_wr.type = RDMA_WR_TYPE_SEND;
786 		rdma_req->rsp.wr.wr_id = (uintptr_t)&rdma_req->rsp.rdma_wr;
787 		rdma_req->rsp.wr.next = NULL;
788 		rdma_req->rsp.wr.opcode = IBV_WR_SEND;
789 		rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
790 		rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl;
791 		rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
792 
793 		/* Set up memory for data buffers */
794 		rdma_req->data.rdma_wr.type = RDMA_WR_TYPE_DATA;
795 		rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
796 		rdma_req->data.wr.next = NULL;
797 		rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
798 		rdma_req->data.wr.sg_list = rdma_req->data.sgl;
799 		rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
800 
801 		/* Initialize request state to FREE */
802 		rdma_req->state = RDMA_REQUEST_STATE_FREE;
803 		STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
804 	}
805 
806 	if (srq) {
807 		rc = spdk_rdma_srq_flush_recv_wrs(srq, &bad_wr);
808 	} else {
809 		rc = spdk_rdma_qp_flush_recv_wrs(qp, &bad_wr);
810 	}
811 
812 	if (rc) {
813 		goto cleanup;
814 	}
815 
816 	return resources;
817 
818 cleanup:
819 	nvmf_rdma_resources_destroy(resources);
820 	return NULL;
821 }
822 
823 static void
824 nvmf_rdma_qpair_clean_ibv_events(struct spdk_nvmf_rdma_qpair *rqpair)
825 {
826 	struct spdk_nvmf_rdma_ibv_event_ctx *ctx, *tctx;
827 	STAILQ_FOREACH_SAFE(ctx, &rqpair->ibv_events, link, tctx) {
828 		ctx->rqpair = NULL;
829 		/* Memory allocated for ctx is freed in nvmf_rdma_qpair_process_ibv_event */
830 		STAILQ_REMOVE(&rqpair->ibv_events, ctx, spdk_nvmf_rdma_ibv_event_ctx, link);
831 	}
832 }
833 
834 static void
835 nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
836 {
837 	struct spdk_nvmf_rdma_recv	*rdma_recv, *recv_tmp;
838 	struct ibv_recv_wr		*bad_recv_wr = NULL;
839 	int				rc;
840 
841 	spdk_trace_record(TRACE_RDMA_QP_DESTROY, 0, 0, (uintptr_t)rqpair);
842 
843 	if (rqpair->qd != 0) {
844 		struct spdk_nvmf_qpair *qpair = &rqpair->qpair;
845 		struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(qpair->transport,
846 				struct spdk_nvmf_rdma_transport, transport);
847 		struct spdk_nvmf_rdma_request *req;
848 		uint32_t i, max_req_count = 0;
849 
850 		SPDK_WARNLOG("Destroying qpair when queue depth is %d\n", rqpair->qd);
851 
852 		if (rqpair->srq == NULL) {
853 			nvmf_rdma_dump_qpair_contents(rqpair);
854 			max_req_count = rqpair->max_queue_depth;
855 		} else if (rqpair->poller && rqpair->resources) {
856 			max_req_count = rqpair->poller->max_srq_depth;
857 		}
858 
859 		SPDK_DEBUGLOG(rdma, "Release incomplete requests\n");
860 		for (i = 0; i < max_req_count; i++) {
861 			req = &rqpair->resources->reqs[i];
862 			if (req->req.qpair == qpair && req->state != RDMA_REQUEST_STATE_FREE) {
863 				/* nvmf_rdma_request_process checks qpair ibv and internal state
864 				 * and completes a request */
865 				nvmf_rdma_request_process(rtransport, req);
866 			}
867 		}
868 		assert(rqpair->qd == 0);
869 	}
870 
871 	if (rqpair->poller) {
872 		RB_REMOVE(qpairs_tree, &rqpair->poller->qpairs, rqpair);
873 
874 		if (rqpair->srq != NULL && rqpair->resources != NULL) {
875 			/* Drop all received but unprocessed commands for this queue and return them to SRQ */
876 			STAILQ_FOREACH_SAFE(rdma_recv, &rqpair->resources->incoming_queue, link, recv_tmp) {
877 				if (rqpair == rdma_recv->qpair) {
878 					STAILQ_REMOVE(&rqpair->resources->incoming_queue, rdma_recv, spdk_nvmf_rdma_recv, link);
879 					spdk_rdma_srq_queue_recv_wrs(rqpair->srq, &rdma_recv->wr);
880 					rc = spdk_rdma_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
881 					if (rc) {
882 						SPDK_ERRLOG("Unable to re-post rx descriptor\n");
883 					}
884 				}
885 			}
886 		}
887 	}
888 
889 	if (rqpair->cm_id) {
890 		if (rqpair->rdma_qp != NULL) {
891 			spdk_rdma_qp_destroy(rqpair->rdma_qp);
892 			rqpair->rdma_qp = NULL;
893 		}
894 		rdma_destroy_id(rqpair->cm_id);
895 
896 		if (rqpair->poller != NULL && rqpair->srq == NULL) {
897 			rqpair->poller->required_num_wr -= MAX_WR_PER_QP(rqpair->max_queue_depth);
898 		}
899 	}
900 
901 	if (rqpair->srq == NULL && rqpair->resources != NULL) {
902 		nvmf_rdma_resources_destroy(rqpair->resources);
903 	}
904 
905 	nvmf_rdma_qpair_clean_ibv_events(rqpair);
906 
907 	if (rqpair->destruct_channel) {
908 		spdk_put_io_channel(rqpair->destruct_channel);
909 		rqpair->destruct_channel = NULL;
910 	}
911 
912 	free(rqpair);
913 }
914 
915 static int
916 nvmf_rdma_resize_cq(struct spdk_nvmf_rdma_qpair *rqpair, struct spdk_nvmf_rdma_device *device)
917 {
918 	struct spdk_nvmf_rdma_poller	*rpoller;
919 	int				rc, num_cqe, required_num_wr;
920 
921 	/* Enlarge CQ size dynamically */
922 	rpoller = rqpair->poller;
923 	required_num_wr = rpoller->required_num_wr + MAX_WR_PER_QP(rqpair->max_queue_depth);
924 	num_cqe = rpoller->num_cqe;
925 	if (num_cqe < required_num_wr) {
926 		num_cqe = spdk_max(num_cqe * 2, required_num_wr);
927 		num_cqe = spdk_min(num_cqe, device->attr.max_cqe);
928 	}
929 
930 	if (rpoller->num_cqe != num_cqe) {
931 		if (device->context->device->transport_type == IBV_TRANSPORT_IWARP) {
932 			SPDK_ERRLOG("iWARP doesn't support CQ resize. Current capacity %u, required %u\n"
933 				    "Using CQ of insufficient size may lead to CQ overrun\n", rpoller->num_cqe, num_cqe);
934 			return -1;
935 		}
936 		if (required_num_wr > device->attr.max_cqe) {
937 			SPDK_ERRLOG("RDMA CQE requirement (%d) exceeds device max_cqe limitation (%d)\n",
938 				    required_num_wr, device->attr.max_cqe);
939 			return -1;
940 		}
941 
942 		SPDK_DEBUGLOG(rdma, "Resize RDMA CQ from %d to %d\n", rpoller->num_cqe, num_cqe);
943 		rc = ibv_resize_cq(rpoller->cq, num_cqe);
944 		if (rc) {
945 			SPDK_ERRLOG("RDMA CQ resize failed: errno %d: %s\n", errno, spdk_strerror(errno));
946 			return -1;
947 		}
948 
949 		rpoller->num_cqe = num_cqe;
950 	}
951 
952 	rpoller->required_num_wr = required_num_wr;
953 	return 0;
954 }
955 
956 static int
957 nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
958 {
959 	struct spdk_nvmf_rdma_qpair		*rqpair;
960 	struct spdk_nvmf_rdma_transport		*rtransport;
961 	struct spdk_nvmf_transport		*transport;
962 	struct spdk_nvmf_rdma_resource_opts	opts;
963 	struct spdk_nvmf_rdma_device		*device;
964 	struct spdk_rdma_qp_init_attr		qp_init_attr = {};
965 
966 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
967 	device = rqpair->device;
968 
969 	qp_init_attr.qp_context	= rqpair;
970 	qp_init_attr.pd		= device->pd;
971 	qp_init_attr.send_cq	= rqpair->poller->cq;
972 	qp_init_attr.recv_cq	= rqpair->poller->cq;
973 
974 	if (rqpair->srq) {
975 		qp_init_attr.srq		= rqpair->srq->srq;
976 	} else {
977 		qp_init_attr.cap.max_recv_wr	= rqpair->max_queue_depth;
978 	}
979 
980 	/* SEND, READ, and WRITE operations */
981 	qp_init_attr.cap.max_send_wr	= (uint32_t)rqpair->max_queue_depth * 2;
982 	qp_init_attr.cap.max_send_sge	= spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_TX_SGE);
983 	qp_init_attr.cap.max_recv_sge	= spdk_min((uint32_t)device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
984 	qp_init_attr.stats		= &rqpair->poller->stat.qp_stats;
985 
986 	if (rqpair->srq == NULL && nvmf_rdma_resize_cq(rqpair, device) < 0) {
987 		SPDK_ERRLOG("Failed to resize the completion queue. Cannot initialize qpair.\n");
988 		goto error;
989 	}
990 
991 	rqpair->rdma_qp = spdk_rdma_qp_create(rqpair->cm_id, &qp_init_attr);
992 	if (!rqpair->rdma_qp) {
993 		goto error;
994 	}
995 
996 	rqpair->qp_num = rqpair->rdma_qp->qp->qp_num;
997 
998 	rqpair->max_send_depth = spdk_min((uint32_t)(rqpair->max_queue_depth * 2),
999 					  qp_init_attr.cap.max_send_wr);
1000 	rqpair->max_send_sge = spdk_min(NVMF_DEFAULT_TX_SGE, qp_init_attr.cap.max_send_sge);
1001 	rqpair->max_recv_sge = spdk_min(NVMF_DEFAULT_RX_SGE, qp_init_attr.cap.max_recv_sge);
1002 	spdk_trace_record(TRACE_RDMA_QP_CREATE, 0, 0, (uintptr_t)rqpair);
1003 	SPDK_DEBUGLOG(rdma, "New RDMA Connection: %p\n", qpair);
1004 
1005 	if (rqpair->poller->srq == NULL) {
1006 		rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
1007 		transport = &rtransport->transport;
1008 
1009 		opts.qp = rqpair->rdma_qp;
1010 		opts.map = device->map;
1011 		opts.qpair = rqpair;
1012 		opts.shared = false;
1013 		opts.max_queue_depth = rqpair->max_queue_depth;
1014 		opts.in_capsule_data_size = transport->opts.in_capsule_data_size;
1015 
1016 		rqpair->resources = nvmf_rdma_resources_create(&opts);
1017 
1018 		if (!rqpair->resources) {
1019 			SPDK_ERRLOG("Unable to allocate resources for receive queue.\n");
1020 			rdma_destroy_qp(rqpair->cm_id);
1021 			goto error;
1022 		}
1023 	} else {
1024 		rqpair->resources = rqpair->poller->resources;
1025 	}
1026 
1027 	rqpair->current_recv_depth = 0;
1028 	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
1029 	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
1030 
1031 	return 0;
1032 
1033 error:
1034 	rdma_destroy_id(rqpair->cm_id);
1035 	rqpair->cm_id = NULL;
1036 	return -1;
1037 }
1038 
1039 /* Append the given recv wr structure to the resource structs outstanding recvs list. */
1040 /* This function accepts either a single wr or the first wr in a linked list. */
1041 static void
1042 nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *first)
1043 {
1044 	struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
1045 			struct spdk_nvmf_rdma_transport, transport);
1046 
1047 	if (rqpair->srq != NULL) {
1048 		spdk_rdma_srq_queue_recv_wrs(rqpair->srq, first);
1049 	} else {
1050 		if (spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, first)) {
1051 			STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
1052 		}
1053 	}
1054 
1055 	if (rtransport->rdma_opts.no_wr_batching) {
1056 		_poller_submit_recvs(rtransport, rqpair->poller);
1057 	}
1058 }
1059 
1060 static int
1061 request_transfer_in(struct spdk_nvmf_request *req)
1062 {
1063 	struct spdk_nvmf_rdma_request	*rdma_req;
1064 	struct spdk_nvmf_qpair		*qpair;
1065 	struct spdk_nvmf_rdma_qpair	*rqpair;
1066 	struct spdk_nvmf_rdma_transport *rtransport;
1067 
1068 	qpair = req->qpair;
1069 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1070 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
1071 	rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
1072 				      struct spdk_nvmf_rdma_transport, transport);
1073 
1074 	assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
1075 	assert(rdma_req != NULL);
1076 
1077 	if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, &rdma_req->data.wr)) {
1078 		STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
1079 	}
1080 	if (rtransport->rdma_opts.no_wr_batching) {
1081 		_poller_submit_sends(rtransport, rqpair->poller);
1082 	}
1083 
1084 	rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
1085 	rqpair->current_send_depth += rdma_req->num_outstanding_data_wr;
1086 	return 0;
1087 }
1088 
1089 static int
1090 request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
1091 {
1092 	int				num_outstanding_data_wr = 0;
1093 	struct spdk_nvmf_rdma_request	*rdma_req;
1094 	struct spdk_nvmf_qpair		*qpair;
1095 	struct spdk_nvmf_rdma_qpair	*rqpair;
1096 	struct spdk_nvme_cpl		*rsp;
1097 	struct ibv_send_wr		*first = NULL;
1098 	struct spdk_nvmf_rdma_transport *rtransport;
1099 
1100 	*data_posted = 0;
1101 	qpair = req->qpair;
1102 	rsp = &req->rsp->nvme_cpl;
1103 	rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1104 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
1105 	rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
1106 				      struct spdk_nvmf_rdma_transport, transport);
1107 
1108 	/* Advance our sq_head pointer */
1109 	if (qpair->sq_head == qpair->sq_head_max) {
1110 		qpair->sq_head = 0;
1111 	} else {
1112 		qpair->sq_head++;
1113 	}
1114 	rsp->sqhd = qpair->sq_head;
1115 
1116 	/* queue the capsule for the recv buffer */
1117 	assert(rdma_req->recv != NULL);
1118 
1119 	nvmf_rdma_qpair_queue_recv_wrs(rqpair, &rdma_req->recv->wr);
1120 
1121 	rdma_req->recv = NULL;
1122 	assert(rqpair->current_recv_depth > 0);
1123 	rqpair->current_recv_depth--;
1124 
1125 	/* Build the response which consists of optional
1126 	 * RDMA WRITEs to transfer data, plus an RDMA SEND
1127 	 * containing the response.
1128 	 */
1129 	first = &rdma_req->rsp.wr;
1130 
1131 	if (rsp->status.sc != SPDK_NVME_SC_SUCCESS) {
1132 		/* On failure, data was not read from the controller. So clear the
1133 		 * number of outstanding data WRs to zero.
1134 		 */
1135 		rdma_req->num_outstanding_data_wr = 0;
1136 	} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1137 		first = &rdma_req->data.wr;
1138 		*data_posted = 1;
1139 		num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
1140 	}
1141 	if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, first)) {
1142 		STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
1143 	}
1144 	if (rtransport->rdma_opts.no_wr_batching) {
1145 		_poller_submit_sends(rtransport, rqpair->poller);
1146 	}
1147 
1148 	/* +1 for the rsp wr */
1149 	rqpair->current_send_depth += num_outstanding_data_wr + 1;
1150 
1151 	return 0;
1152 }
1153 
1154 static int
1155 nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *rqpair)
1156 {
1157 	struct spdk_nvmf_rdma_accept_private_data	accept_data;
1158 	struct rdma_conn_param				ctrlr_event_data = {};
1159 	int						rc;
1160 
1161 	accept_data.recfmt = 0;
1162 	accept_data.crqsize = rqpair->max_queue_depth;
1163 
1164 	ctrlr_event_data.private_data = &accept_data;
1165 	ctrlr_event_data.private_data_len = sizeof(accept_data);
1166 	if (id->ps == RDMA_PS_TCP) {
1167 		ctrlr_event_data.responder_resources = 0; /* We accept 0 reads from the host */
1168 		ctrlr_event_data.initiator_depth = rqpair->max_read_depth;
1169 	}
1170 
1171 	/* Configure infinite retries for the initiator side qpair.
1172 	 * We need to pass this value to the initiator to prevent the
1173 	 * initiator side NIC from completing SEND requests back to the
1174 	 * initiator with status rnr_retry_count_exceeded. */
1175 	ctrlr_event_data.rnr_retry_count = 0x7;
1176 
1177 	/* When qpair is created without use of rdma cm API, an additional
1178 	 * information must be provided to initiator in the connection response:
1179 	 * whether qpair is using SRQ and its qp_num
1180 	 * Fields below are ignored by rdma cm if qpair has been
1181 	 * created using rdma cm API. */
1182 	ctrlr_event_data.srq = rqpair->srq ? 1 : 0;
1183 	ctrlr_event_data.qp_num = rqpair->qp_num;
1184 
1185 	rc = spdk_rdma_qp_accept(rqpair->rdma_qp, &ctrlr_event_data);
1186 	if (rc) {
1187 		SPDK_ERRLOG("Error %d on spdk_rdma_qp_accept\n", errno);
1188 	} else {
1189 		SPDK_DEBUGLOG(rdma, "Sent back the accept\n");
1190 	}
1191 
1192 	return rc;
1193 }
1194 
1195 static void
1196 nvmf_rdma_event_reject(struct rdma_cm_id *id, enum spdk_nvmf_rdma_transport_error error)
1197 {
1198 	struct spdk_nvmf_rdma_reject_private_data	rej_data;
1199 
1200 	rej_data.recfmt = 0;
1201 	rej_data.sts = error;
1202 
1203 	rdma_reject(id, &rej_data, sizeof(rej_data));
1204 }
1205 
1206 static int
1207 nvmf_rdma_connect(struct spdk_nvmf_transport *transport, struct rdma_cm_event *event)
1208 {
1209 	struct spdk_nvmf_rdma_transport *rtransport;
1210 	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
1211 	struct spdk_nvmf_rdma_port	*port;
1212 	struct rdma_conn_param		*rdma_param = NULL;
1213 	const struct spdk_nvmf_rdma_request_private_data *private_data = NULL;
1214 	uint16_t			max_queue_depth;
1215 	uint16_t			max_read_depth;
1216 
1217 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
1218 
1219 	assert(event->id != NULL); /* Impossible. Can't even reject the connection. */
1220 	assert(event->id->verbs != NULL); /* Impossible. No way to handle this. */
1221 
1222 	rdma_param = &event->param.conn;
1223 	if (rdma_param->private_data == NULL ||
1224 	    rdma_param->private_data_len < sizeof(struct spdk_nvmf_rdma_request_private_data)) {
1225 		SPDK_ERRLOG("connect request: no private data provided\n");
1226 		nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_PRIVATE_DATA_LENGTH);
1227 		return -1;
1228 	}
1229 
1230 	private_data = rdma_param->private_data;
1231 	if (private_data->recfmt != 0) {
1232 		SPDK_ERRLOG("Received RDMA private data with RECFMT != 0\n");
1233 		nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_INVALID_RECFMT);
1234 		return -1;
1235 	}
1236 
1237 	SPDK_DEBUGLOG(rdma, "Connect Recv on fabric intf name %s, dev_name %s\n",
1238 		      event->id->verbs->device->name, event->id->verbs->device->dev_name);
1239 
1240 	port = event->listen_id->context;
1241 	SPDK_DEBUGLOG(rdma, "Listen Id was %p with verbs %p. ListenAddr: %p\n",
1242 		      event->listen_id, event->listen_id->verbs, port);
1243 
1244 	/* Figure out the supported queue depth. This is a multi-step process
1245 	 * that takes into account hardware maximums, host provided values,
1246 	 * and our target's internal memory limits */
1247 
1248 	SPDK_DEBUGLOG(rdma, "Calculating Queue Depth\n");
1249 
1250 	/* Start with the maximum queue depth allowed by the target */
1251 	max_queue_depth = rtransport->transport.opts.max_queue_depth;
1252 	max_read_depth = rtransport->transport.opts.max_queue_depth;
1253 	SPDK_DEBUGLOG(rdma, "Target Max Queue Depth: %d\n",
1254 		      rtransport->transport.opts.max_queue_depth);
1255 
1256 	/* Next check the local NIC's hardware limitations */
1257 	SPDK_DEBUGLOG(rdma,
1258 		      "Local NIC Max Send/Recv Queue Depth: %d Max Read/Write Queue Depth: %d\n",
1259 		      port->device->attr.max_qp_wr, port->device->attr.max_qp_rd_atom);
1260 	max_queue_depth = spdk_min(max_queue_depth, port->device->attr.max_qp_wr);
1261 	max_read_depth = spdk_min(max_read_depth, port->device->attr.max_qp_init_rd_atom);
1262 
1263 	/* Next check the remote NIC's hardware limitations */
1264 	SPDK_DEBUGLOG(rdma,
1265 		      "Host (Initiator) NIC Max Incoming RDMA R/W operations: %d Max Outgoing RDMA R/W operations: %d\n",
1266 		      rdma_param->initiator_depth, rdma_param->responder_resources);
1267 	if (rdma_param->initiator_depth > 0) {
1268 		max_read_depth = spdk_min(max_read_depth, rdma_param->initiator_depth);
1269 	}
1270 
1271 	/* Finally check for the host software requested values, which are
1272 	 * optional. */
1273 	if (rdma_param->private_data != NULL &&
1274 	    rdma_param->private_data_len >= sizeof(struct spdk_nvmf_rdma_request_private_data)) {
1275 		SPDK_DEBUGLOG(rdma, "Host Receive Queue Size: %d\n", private_data->hrqsize);
1276 		SPDK_DEBUGLOG(rdma, "Host Send Queue Size: %d\n", private_data->hsqsize);
1277 		max_queue_depth = spdk_min(max_queue_depth, private_data->hrqsize);
1278 		max_queue_depth = spdk_min(max_queue_depth, private_data->hsqsize + 1);
1279 	}
1280 
1281 	SPDK_DEBUGLOG(rdma, "Final Negotiated Queue Depth: %d R/W Depth: %d\n",
1282 		      max_queue_depth, max_read_depth);
1283 
1284 	rqpair = calloc(1, sizeof(struct spdk_nvmf_rdma_qpair));
1285 	if (rqpair == NULL) {
1286 		SPDK_ERRLOG("Could not allocate new connection.\n");
1287 		nvmf_rdma_event_reject(event->id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
1288 		return -1;
1289 	}
1290 
1291 	rqpair->device = port->device;
1292 	rqpair->max_queue_depth = max_queue_depth;
1293 	rqpair->max_read_depth = max_read_depth;
1294 	rqpair->cm_id = event->id;
1295 	rqpair->listen_id = event->listen_id;
1296 	rqpair->qpair.transport = transport;
1297 	STAILQ_INIT(&rqpair->ibv_events);
1298 	/* use qid from the private data to determine the qpair type
1299 	   qid will be set to the appropriate value when the controller is created */
1300 	rqpair->qpair.qid = private_data->qid;
1301 
1302 	event->id->context = &rqpair->qpair;
1303 
1304 	spdk_nvmf_tgt_new_qpair(transport->tgt, &rqpair->qpair);
1305 
1306 	return 0;
1307 }
1308 
1309 static inline void
1310 nvmf_rdma_setup_wr(struct ibv_send_wr *wr, struct ibv_send_wr *next,
1311 		   enum spdk_nvme_data_transfer xfer)
1312 {
1313 	if (xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1314 		wr->opcode = IBV_WR_RDMA_WRITE;
1315 		wr->send_flags = 0;
1316 		wr->next = next;
1317 	} else if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1318 		wr->opcode = IBV_WR_RDMA_READ;
1319 		wr->send_flags = IBV_SEND_SIGNALED;
1320 		wr->next = NULL;
1321 	} else {
1322 		assert(0);
1323 	}
1324 }
1325 
1326 static int
1327 nvmf_request_alloc_wrs(struct spdk_nvmf_rdma_transport *rtransport,
1328 		       struct spdk_nvmf_rdma_request *rdma_req,
1329 		       uint32_t num_sgl_descriptors)
1330 {
1331 	struct spdk_nvmf_rdma_request_data	*work_requests[SPDK_NVMF_MAX_SGL_ENTRIES];
1332 	struct spdk_nvmf_rdma_request_data	*current_data_wr;
1333 	uint32_t				i;
1334 
1335 	if (num_sgl_descriptors > SPDK_NVMF_MAX_SGL_ENTRIES) {
1336 		SPDK_ERRLOG("Requested too much entries (%u), the limit is %u\n",
1337 			    num_sgl_descriptors, SPDK_NVMF_MAX_SGL_ENTRIES);
1338 		return -EINVAL;
1339 	}
1340 
1341 	if (spdk_mempool_get_bulk(rtransport->data_wr_pool, (void **)work_requests, num_sgl_descriptors)) {
1342 		return -ENOMEM;
1343 	}
1344 
1345 	current_data_wr = &rdma_req->data;
1346 
1347 	for (i = 0; i < num_sgl_descriptors; i++) {
1348 		nvmf_rdma_setup_wr(&current_data_wr->wr, &work_requests[i]->wr, rdma_req->req.xfer);
1349 		current_data_wr->wr.next = &work_requests[i]->wr;
1350 		current_data_wr = work_requests[i];
1351 		current_data_wr->wr.sg_list = current_data_wr->sgl;
1352 		current_data_wr->wr.wr_id = rdma_req->data.wr.wr_id;
1353 	}
1354 
1355 	nvmf_rdma_setup_wr(&current_data_wr->wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1356 
1357 	return 0;
1358 }
1359 
1360 static inline void
1361 nvmf_rdma_setup_request(struct spdk_nvmf_rdma_request *rdma_req)
1362 {
1363 	struct ibv_send_wr		*wr = &rdma_req->data.wr;
1364 	struct spdk_nvme_sgl_descriptor	*sgl = &rdma_req->req.cmd->nvme_cmd.dptr.sgl1;
1365 
1366 	wr->wr.rdma.rkey = sgl->keyed.key;
1367 	wr->wr.rdma.remote_addr = sgl->address;
1368 	nvmf_rdma_setup_wr(wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1369 }
1370 
1371 static inline void
1372 nvmf_rdma_update_remote_addr(struct spdk_nvmf_rdma_request *rdma_req, uint32_t num_wrs)
1373 {
1374 	struct ibv_send_wr		*wr = &rdma_req->data.wr;
1375 	struct spdk_nvme_sgl_descriptor	*sgl = &rdma_req->req.cmd->nvme_cmd.dptr.sgl1;
1376 	uint32_t			i;
1377 	int				j;
1378 	uint64_t			remote_addr_offset = 0;
1379 
1380 	for (i = 0; i < num_wrs; ++i) {
1381 		wr->wr.rdma.rkey = sgl->keyed.key;
1382 		wr->wr.rdma.remote_addr = sgl->address + remote_addr_offset;
1383 		for (j = 0; j < wr->num_sge; ++j) {
1384 			remote_addr_offset += wr->sg_list[j].length;
1385 		}
1386 		wr = wr->next;
1387 	}
1388 }
1389 
1390 static int
1391 nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
1392 		      struct spdk_nvmf_rdma_device *device,
1393 		      struct spdk_nvmf_rdma_request *rdma_req,
1394 		      struct ibv_send_wr *wr,
1395 		      uint32_t total_length)
1396 {
1397 	struct spdk_rdma_memory_translation mem_translation;
1398 	struct ibv_sge	*sg_ele;
1399 	struct iovec *iov;
1400 	uint32_t lkey, remaining;
1401 	int rc;
1402 
1403 	wr->num_sge = 0;
1404 
1405 	while (total_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
1406 		iov = &rdma_req->req.iov[rdma_req->iovpos];
1407 		rc = spdk_rdma_get_translation(device->map, iov->iov_base, iov->iov_len, &mem_translation);
1408 		if (spdk_unlikely(rc)) {
1409 			return rc;
1410 		}
1411 
1412 		lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
1413 		sg_ele = &wr->sg_list[wr->num_sge];
1414 		remaining = spdk_min((uint32_t)iov->iov_len - rdma_req->offset, total_length);
1415 
1416 		sg_ele->lkey = lkey;
1417 		sg_ele->addr = (uintptr_t)iov->iov_base + rdma_req->offset;
1418 		sg_ele->length = remaining;
1419 		SPDK_DEBUGLOG(rdma, "sge[%d] %p addr 0x%"PRIx64", len %u\n", wr->num_sge, sg_ele, sg_ele->addr,
1420 			      sg_ele->length);
1421 		rdma_req->offset += sg_ele->length;
1422 		total_length -= sg_ele->length;
1423 		wr->num_sge++;
1424 
1425 		if (rdma_req->offset == iov->iov_len) {
1426 			rdma_req->offset = 0;
1427 			rdma_req->iovpos++;
1428 		}
1429 	}
1430 
1431 	if (total_length) {
1432 		SPDK_ERRLOG("Not enough SG entries to hold data buffer\n");
1433 		return -EINVAL;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static int
1440 nvmf_rdma_fill_wr_sgl_with_dif(struct spdk_nvmf_rdma_poll_group *rgroup,
1441 			       struct spdk_nvmf_rdma_device *device,
1442 			       struct spdk_nvmf_rdma_request *rdma_req,
1443 			       struct ibv_send_wr *wr,
1444 			       uint32_t total_length,
1445 			       uint32_t num_extra_wrs)
1446 {
1447 	struct spdk_rdma_memory_translation mem_translation;
1448 	struct spdk_dif_ctx *dif_ctx = &rdma_req->req.dif.dif_ctx;
1449 	struct ibv_sge *sg_ele;
1450 	struct iovec *iov;
1451 	struct iovec *rdma_iov;
1452 	uint32_t lkey, remaining;
1453 	uint32_t remaining_data_block, data_block_size, md_size;
1454 	uint32_t sge_len;
1455 	int rc;
1456 
1457 	data_block_size = dif_ctx->block_size - dif_ctx->md_size;
1458 
1459 	if (spdk_likely(!rdma_req->req.stripped_data)) {
1460 		rdma_iov = rdma_req->req.iov;
1461 		remaining_data_block = data_block_size;
1462 		md_size = dif_ctx->md_size;
1463 	} else {
1464 		rdma_iov = rdma_req->req.stripped_data->iov;
1465 		total_length = total_length / dif_ctx->block_size * data_block_size;
1466 		remaining_data_block = total_length;
1467 		md_size = 0;
1468 	}
1469 
1470 	wr->num_sge = 0;
1471 
1472 	while (total_length && (num_extra_wrs || wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES)) {
1473 		iov = rdma_iov + rdma_req->iovpos;
1474 		rc = spdk_rdma_get_translation(device->map, iov->iov_base, iov->iov_len, &mem_translation);
1475 		if (spdk_unlikely(rc)) {
1476 			return rc;
1477 		}
1478 
1479 		lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
1480 		sg_ele = &wr->sg_list[wr->num_sge];
1481 		remaining = spdk_min((uint32_t)iov->iov_len - rdma_req->offset, total_length);
1482 
1483 		while (remaining) {
1484 			if (wr->num_sge >= SPDK_NVMF_MAX_SGL_ENTRIES) {
1485 				if (num_extra_wrs > 0 && wr->next) {
1486 					wr = wr->next;
1487 					wr->num_sge = 0;
1488 					sg_ele = &wr->sg_list[wr->num_sge];
1489 					num_extra_wrs--;
1490 				} else {
1491 					break;
1492 				}
1493 			}
1494 			sg_ele->lkey = lkey;
1495 			sg_ele->addr = (uintptr_t)((char *)iov->iov_base + rdma_req->offset);
1496 			sge_len = spdk_min(remaining, remaining_data_block);
1497 			sg_ele->length = sge_len;
1498 			SPDK_DEBUGLOG(rdma, "sge[%d] %p addr 0x%"PRIx64", len %u\n", wr->num_sge, sg_ele,
1499 				      sg_ele->addr, sg_ele->length);
1500 			remaining -= sge_len;
1501 			remaining_data_block -= sge_len;
1502 			rdma_req->offset += sge_len;
1503 			total_length -= sge_len;
1504 
1505 			sg_ele++;
1506 			wr->num_sge++;
1507 
1508 			if (remaining_data_block == 0) {
1509 				/* skip metadata */
1510 				rdma_req->offset += md_size;
1511 				total_length -= md_size;
1512 				/* Metadata that do not fit this IO buffer will be included in the next IO buffer */
1513 				remaining -= spdk_min(remaining, md_size);
1514 				remaining_data_block = data_block_size;
1515 			}
1516 
1517 			if (remaining == 0) {
1518 				/* By subtracting the size of the last IOV from the offset, we ensure that we skip
1519 				   the remaining metadata bits at the beginning of the next buffer */
1520 				rdma_req->offset -= spdk_min(iov->iov_len, rdma_req->offset);
1521 				rdma_req->iovpos++;
1522 			}
1523 		}
1524 	}
1525 
1526 	if (total_length) {
1527 		SPDK_ERRLOG("Not enough SG entries to hold data buffer\n");
1528 		return -EINVAL;
1529 	}
1530 
1531 	return 0;
1532 }
1533 
1534 static inline uint32_t
1535 nvmf_rdma_calc_num_wrs(uint32_t length, uint32_t io_unit_size, uint32_t block_size)
1536 {
1537 	/* estimate the number of SG entries and WRs needed to process the request */
1538 	uint32_t num_sge = 0;
1539 	uint32_t i;
1540 	uint32_t num_buffers = SPDK_CEIL_DIV(length, io_unit_size);
1541 
1542 	for (i = 0; i < num_buffers && length > 0; i++) {
1543 		uint32_t buffer_len = spdk_min(length, io_unit_size);
1544 		uint32_t num_sge_in_block = SPDK_CEIL_DIV(buffer_len, block_size);
1545 
1546 		if (num_sge_in_block * block_size > buffer_len) {
1547 			++num_sge_in_block;
1548 		}
1549 		num_sge += num_sge_in_block;
1550 		length -= buffer_len;
1551 	}
1552 	return SPDK_CEIL_DIV(num_sge, SPDK_NVMF_MAX_SGL_ENTRIES);
1553 }
1554 
1555 static int
1556 nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
1557 			    struct spdk_nvmf_rdma_device *device,
1558 			    struct spdk_nvmf_rdma_request *rdma_req)
1559 {
1560 	struct spdk_nvmf_rdma_qpair		*rqpair;
1561 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1562 	struct spdk_nvmf_request		*req = &rdma_req->req;
1563 	struct ibv_send_wr			*wr = &rdma_req->data.wr;
1564 	int					rc;
1565 	uint32_t				num_wrs = 1;
1566 	uint32_t				length;
1567 
1568 	rqpair = SPDK_CONTAINEROF(req->qpair, struct spdk_nvmf_rdma_qpair, qpair);
1569 	rgroup = rqpair->poller->group;
1570 
1571 	/* rdma wr specifics */
1572 	nvmf_rdma_setup_request(rdma_req);
1573 
1574 	length = req->length;
1575 	if (spdk_unlikely(req->dif_enabled)) {
1576 		req->dif.orig_length = length;
1577 		length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
1578 		req->dif.elba_length = length;
1579 	}
1580 
1581 	rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
1582 					   length);
1583 	if (rc != 0) {
1584 		return rc;
1585 	}
1586 
1587 	assert(req->iovcnt <= rqpair->max_send_sge);
1588 
1589 	/* When dif_insert_or_strip is true and the I/O data length is greater than one block,
1590 	 * the stripped_buffers are got for DIF stripping. */
1591 	if (spdk_unlikely(req->dif_enabled && (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST)
1592 			  && (req->dif.elba_length > req->dif.dif_ctx.block_size))) {
1593 		rc = nvmf_request_get_stripped_buffers(req, &rgroup->group,
1594 						       &rtransport->transport, req->dif.orig_length);
1595 		if (rc != 0) {
1596 			SPDK_INFOLOG(rdma, "Get stripped buffers fail %d, fallback to req.iov.\n", rc);
1597 		}
1598 	}
1599 
1600 	rdma_req->iovpos = 0;
1601 
1602 	if (spdk_unlikely(req->dif_enabled)) {
1603 		num_wrs = nvmf_rdma_calc_num_wrs(length, rtransport->transport.opts.io_unit_size,
1604 						 req->dif.dif_ctx.block_size);
1605 		if (num_wrs > 1) {
1606 			rc = nvmf_request_alloc_wrs(rtransport, rdma_req, num_wrs - 1);
1607 			if (rc != 0) {
1608 				goto err_exit;
1609 			}
1610 		}
1611 
1612 		rc = nvmf_rdma_fill_wr_sgl_with_dif(rgroup, device, rdma_req, wr, length, num_wrs - 1);
1613 		if (spdk_unlikely(rc != 0)) {
1614 			goto err_exit;
1615 		}
1616 
1617 		if (num_wrs > 1) {
1618 			nvmf_rdma_update_remote_addr(rdma_req, num_wrs);
1619 		}
1620 	} else {
1621 		rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, wr, length);
1622 		if (spdk_unlikely(rc != 0)) {
1623 			goto err_exit;
1624 		}
1625 	}
1626 
1627 	/* set the number of outstanding data WRs for this request. */
1628 	rdma_req->num_outstanding_data_wr = num_wrs;
1629 
1630 	return rc;
1631 
1632 err_exit:
1633 	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
1634 	nvmf_rdma_request_free_data(rdma_req, rtransport);
1635 	req->iovcnt = 0;
1636 	return rc;
1637 }
1638 
1639 static int
1640 nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtransport,
1641 				      struct spdk_nvmf_rdma_device *device,
1642 				      struct spdk_nvmf_rdma_request *rdma_req)
1643 {
1644 	struct spdk_nvmf_rdma_qpair		*rqpair;
1645 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1646 	struct ibv_send_wr			*current_wr;
1647 	struct spdk_nvmf_request		*req = &rdma_req->req;
1648 	struct spdk_nvme_sgl_descriptor		*inline_segment, *desc;
1649 	uint32_t				num_sgl_descriptors;
1650 	uint32_t				lengths[SPDK_NVMF_MAX_SGL_ENTRIES], total_length = 0;
1651 	uint32_t				i;
1652 	int					rc;
1653 
1654 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1655 	rgroup = rqpair->poller->group;
1656 
1657 	inline_segment = &req->cmd->nvme_cmd.dptr.sgl1;
1658 	assert(inline_segment->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
1659 	assert(inline_segment->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1660 
1661 	num_sgl_descriptors = inline_segment->unkeyed.length / sizeof(struct spdk_nvme_sgl_descriptor);
1662 	assert(num_sgl_descriptors <= SPDK_NVMF_MAX_SGL_ENTRIES);
1663 
1664 	desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1665 	for (i = 0; i < num_sgl_descriptors; i++) {
1666 		if (spdk_likely(!req->dif_enabled)) {
1667 			lengths[i] = desc->keyed.length;
1668 		} else {
1669 			req->dif.orig_length += desc->keyed.length;
1670 			lengths[i] = spdk_dif_get_length_with_md(desc->keyed.length, &req->dif.dif_ctx);
1671 			req->dif.elba_length += lengths[i];
1672 		}
1673 		total_length += lengths[i];
1674 		desc++;
1675 	}
1676 
1677 	if (total_length > rtransport->transport.opts.max_io_size) {
1678 		SPDK_ERRLOG("Multi SGL length 0x%x exceeds max io size 0x%x\n",
1679 			    total_length, rtransport->transport.opts.max_io_size);
1680 		req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1681 		return -EINVAL;
1682 	}
1683 
1684 	if (nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1) != 0) {
1685 		return -ENOMEM;
1686 	}
1687 
1688 	rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport, total_length);
1689 	if (rc != 0) {
1690 		nvmf_rdma_request_free_data(rdma_req, rtransport);
1691 		return rc;
1692 	}
1693 
1694 	/* When dif_insert_or_strip is true and the I/O data length is greater than one block,
1695 	 * the stripped_buffers are got for DIF stripping. */
1696 	if (spdk_unlikely(req->dif_enabled && (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST)
1697 			  && (req->dif.elba_length > req->dif.dif_ctx.block_size))) {
1698 		rc = nvmf_request_get_stripped_buffers(req, &rgroup->group,
1699 						       &rtransport->transport, req->dif.orig_length);
1700 		if (rc != 0) {
1701 			SPDK_INFOLOG(rdma, "Get stripped buffers fail %d, fallback to req.iov.\n", rc);
1702 		}
1703 	}
1704 
1705 	/* The first WR must always be the embedded data WR. This is how we unwind them later. */
1706 	current_wr = &rdma_req->data.wr;
1707 	assert(current_wr != NULL);
1708 
1709 	req->length = 0;
1710 	rdma_req->iovpos = 0;
1711 	desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1712 	for (i = 0; i < num_sgl_descriptors; i++) {
1713 		/* The descriptors must be keyed data block descriptors with an address, not an offset. */
1714 		if (spdk_unlikely(desc->generic.type != SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK ||
1715 				  desc->keyed.subtype != SPDK_NVME_SGL_SUBTYPE_ADDRESS)) {
1716 			rc = -EINVAL;
1717 			goto err_exit;
1718 		}
1719 
1720 		if (spdk_likely(!req->dif_enabled)) {
1721 			rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, current_wr, lengths[i]);
1722 		} else {
1723 			rc = nvmf_rdma_fill_wr_sgl_with_dif(rgroup, device, rdma_req, current_wr,
1724 							    lengths[i], 0);
1725 		}
1726 		if (rc != 0) {
1727 			rc = -ENOMEM;
1728 			goto err_exit;
1729 		}
1730 
1731 		req->length += desc->keyed.length;
1732 		current_wr->wr.rdma.rkey = desc->keyed.key;
1733 		current_wr->wr.rdma.remote_addr = desc->address;
1734 		current_wr = current_wr->next;
1735 		desc++;
1736 	}
1737 
1738 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1739 	/* Go back to the last descriptor in the list. */
1740 	desc--;
1741 	if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1742 		if (desc->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
1743 			rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1744 			rdma_req->rsp.wr.imm_data = desc->keyed.key;
1745 		}
1746 	}
1747 #endif
1748 
1749 	rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
1750 
1751 	return 0;
1752 
1753 err_exit:
1754 	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
1755 	nvmf_rdma_request_free_data(rdma_req, rtransport);
1756 	return rc;
1757 }
1758 
1759 static int
1760 nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
1761 			    struct spdk_nvmf_rdma_device *device,
1762 			    struct spdk_nvmf_rdma_request *rdma_req)
1763 {
1764 	struct spdk_nvmf_request		*req = &rdma_req->req;
1765 	struct spdk_nvme_cpl			*rsp;
1766 	struct spdk_nvme_sgl_descriptor		*sgl;
1767 	int					rc;
1768 	uint32_t				length;
1769 
1770 	rsp = &req->rsp->nvme_cpl;
1771 	sgl = &req->cmd->nvme_cmd.dptr.sgl1;
1772 
1773 	if (sgl->generic.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK &&
1774 	    (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS ||
1775 	     sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY)) {
1776 
1777 		length = sgl->keyed.length;
1778 		if (length > rtransport->transport.opts.max_io_size) {
1779 			SPDK_ERRLOG("SGL length 0x%x exceeds max io size 0x%x\n",
1780 				    length, rtransport->transport.opts.max_io_size);
1781 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1782 			return -1;
1783 		}
1784 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
1785 		if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) != 0) {
1786 			if (sgl->keyed.subtype == SPDK_NVME_SGL_SUBTYPE_INVALIDATE_KEY) {
1787 				rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1788 				rdma_req->rsp.wr.imm_data = sgl->keyed.key;
1789 			}
1790 		}
1791 #endif
1792 
1793 		/* fill request length and populate iovs */
1794 		req->length = length;
1795 
1796 		rc = nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req);
1797 		if (spdk_unlikely(rc < 0)) {
1798 			if (rc == -EINVAL) {
1799 				SPDK_ERRLOG("SGL length exceeds the max I/O size\n");
1800 				rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1801 				return -1;
1802 			}
1803 			/* No available buffers. Queue this request up. */
1804 			SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
1805 			return 0;
1806 		}
1807 
1808 		/* backward compatible */
1809 		req->data = req->iov[0].iov_base;
1810 
1811 		SPDK_DEBUGLOG(rdma, "Request %p took %d buffer/s from central pool\n", rdma_req,
1812 			      req->iovcnt);
1813 
1814 		return 0;
1815 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK &&
1816 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
1817 		uint64_t offset = sgl->address;
1818 		uint32_t max_len = rtransport->transport.opts.in_capsule_data_size;
1819 
1820 		SPDK_DEBUGLOG(nvmf, "In-capsule data: offset 0x%" PRIx64 ", length 0x%x\n",
1821 			      offset, sgl->unkeyed.length);
1822 
1823 		if (offset > max_len) {
1824 			SPDK_ERRLOG("In-capsule offset 0x%" PRIx64 " exceeds capsule length 0x%x\n",
1825 				    offset, max_len);
1826 			rsp->status.sc = SPDK_NVME_SC_INVALID_SGL_OFFSET;
1827 			return -1;
1828 		}
1829 		max_len -= (uint32_t)offset;
1830 
1831 		if (sgl->unkeyed.length > max_len) {
1832 			SPDK_ERRLOG("In-capsule data length 0x%x exceeds capsule length 0x%x\n",
1833 				    sgl->unkeyed.length, max_len);
1834 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1835 			return -1;
1836 		}
1837 
1838 		rdma_req->num_outstanding_data_wr = 0;
1839 		req->data = rdma_req->recv->buf + offset;
1840 		req->data_from_pool = false;
1841 		req->length = sgl->unkeyed.length;
1842 
1843 		req->iov[0].iov_base = req->data;
1844 		req->iov[0].iov_len = req->length;
1845 		req->iovcnt = 1;
1846 
1847 		return 0;
1848 	} else if (sgl->generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT &&
1849 		   sgl->unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET) {
1850 
1851 		rc = nvmf_rdma_request_fill_iovs_multi_sgl(rtransport, device, rdma_req);
1852 		if (rc == -ENOMEM) {
1853 			SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
1854 			return 0;
1855 		} else if (rc == -EINVAL) {
1856 			SPDK_ERRLOG("Multi SGL element request length exceeds the max I/O size\n");
1857 			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
1858 			return -1;
1859 		}
1860 
1861 		/* backward compatible */
1862 		req->data = req->iov[0].iov_base;
1863 
1864 		SPDK_DEBUGLOG(rdma, "Request %p took %d buffer/s from central pool\n", rdma_req,
1865 			      req->iovcnt);
1866 
1867 		return 0;
1868 	}
1869 
1870 	SPDK_ERRLOG("Invalid NVMf I/O Command SGL:  Type 0x%x, Subtype 0x%x\n",
1871 		    sgl->generic.type, sgl->generic.subtype);
1872 	rsp->status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
1873 	return -1;
1874 }
1875 
1876 static void
1877 _nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
1878 			struct spdk_nvmf_rdma_transport	*rtransport)
1879 {
1880 	struct spdk_nvmf_rdma_qpair		*rqpair;
1881 	struct spdk_nvmf_rdma_poll_group	*rgroup;
1882 
1883 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1884 	if (rdma_req->req.data_from_pool) {
1885 		rgroup = rqpair->poller->group;
1886 
1887 		spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport);
1888 	}
1889 	if (rdma_req->req.stripped_data) {
1890 		nvmf_request_free_stripped_buffers(&rdma_req->req,
1891 						   &rqpair->poller->group->group,
1892 						   &rtransport->transport);
1893 	}
1894 	nvmf_rdma_request_free_data(rdma_req, rtransport);
1895 	rdma_req->req.length = 0;
1896 	rdma_req->req.iovcnt = 0;
1897 	rdma_req->req.data = NULL;
1898 	rdma_req->offset = 0;
1899 	rdma_req->req.dif_enabled = false;
1900 	rdma_req->fused_failed = false;
1901 	if (rdma_req->fused_pair) {
1902 		/* This req was part of a valid fused pair, but failed before it got to
1903 		 * READ_TO_EXECUTE state.  This means we need to fail the other request
1904 		 * in the pair, because it is no longer part of a valid pair.  If the pair
1905 		 * already reached READY_TO_EXECUTE state, we need to kick it.
1906 		 */
1907 		rdma_req->fused_pair->fused_failed = true;
1908 		if (rdma_req->fused_pair->state == RDMA_REQUEST_STATE_READY_TO_EXECUTE) {
1909 			nvmf_rdma_request_process(rtransport, rdma_req->fused_pair);
1910 		}
1911 		rdma_req->fused_pair = NULL;
1912 	}
1913 	memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
1914 	rqpair->qd--;
1915 
1916 	STAILQ_INSERT_HEAD(&rqpair->resources->free_queue, rdma_req, state_link);
1917 	rdma_req->state = RDMA_REQUEST_STATE_FREE;
1918 }
1919 
1920 static void
1921 nvmf_rdma_check_fused_ordering(struct spdk_nvmf_rdma_transport *rtransport,
1922 			       struct spdk_nvmf_rdma_qpair *rqpair,
1923 			       struct spdk_nvmf_rdma_request *rdma_req)
1924 {
1925 	enum spdk_nvme_cmd_fuse last, next;
1926 
1927 	last = rqpair->fused_first ? rqpair->fused_first->req.cmd->nvme_cmd.fuse : SPDK_NVME_CMD_FUSE_NONE;
1928 	next = rdma_req->req.cmd->nvme_cmd.fuse;
1929 
1930 	assert(last != SPDK_NVME_CMD_FUSE_SECOND);
1931 
1932 	if (spdk_likely(last == SPDK_NVME_CMD_FUSE_NONE && next == SPDK_NVME_CMD_FUSE_NONE)) {
1933 		return;
1934 	}
1935 
1936 	if (last == SPDK_NVME_CMD_FUSE_FIRST) {
1937 		if (next == SPDK_NVME_CMD_FUSE_SECOND) {
1938 			/* This is a valid pair of fused commands.  Point them at each other
1939 			 * so they can be submitted consecutively once ready to be executed.
1940 			 */
1941 			rqpair->fused_first->fused_pair = rdma_req;
1942 			rdma_req->fused_pair = rqpair->fused_first;
1943 			rqpair->fused_first = NULL;
1944 			return;
1945 		} else {
1946 			/* Mark the last req as failed since it wasn't followed by a SECOND. */
1947 			rqpair->fused_first->fused_failed = true;
1948 
1949 			/* If the last req is in READY_TO_EXECUTE state, then call
1950 			 * nvmf_rdma_request_process(), otherwise nothing else will kick it.
1951 			 */
1952 			if (rqpair->fused_first->state == RDMA_REQUEST_STATE_READY_TO_EXECUTE) {
1953 				nvmf_rdma_request_process(rtransport, rqpair->fused_first);
1954 			}
1955 
1956 			rqpair->fused_first = NULL;
1957 		}
1958 	}
1959 
1960 	if (next == SPDK_NVME_CMD_FUSE_FIRST) {
1961 		/* Set rqpair->fused_first here so that we know to check that the next request
1962 		 * is a SECOND (and to fail this one if it isn't).
1963 		 */
1964 		rqpair->fused_first = rdma_req;
1965 	} else if (next == SPDK_NVME_CMD_FUSE_SECOND) {
1966 		/* Mark this req failed since it ia SECOND and the last one was not a FIRST. */
1967 		rdma_req->fused_failed = true;
1968 	}
1969 }
1970 
1971 bool
1972 nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
1973 			  struct spdk_nvmf_rdma_request *rdma_req)
1974 {
1975 	struct spdk_nvmf_rdma_qpair	*rqpair;
1976 	struct spdk_nvmf_rdma_device	*device;
1977 	struct spdk_nvmf_rdma_poll_group *rgroup;
1978 	struct spdk_nvme_cpl		*rsp = &rdma_req->req.rsp->nvme_cpl;
1979 	int				rc;
1980 	struct spdk_nvmf_rdma_recv	*rdma_recv;
1981 	enum spdk_nvmf_rdma_request_state prev_state;
1982 	bool				progress = false;
1983 	int				data_posted;
1984 	uint32_t			num_blocks;
1985 
1986 	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1987 	device = rqpair->device;
1988 	rgroup = rqpair->poller->group;
1989 
1990 	assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
1991 
1992 	/* If the queue pair is in an error state, force the request to the completed state
1993 	 * to release resources. */
1994 	if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1995 		if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
1996 			STAILQ_REMOVE(&rgroup->group.pending_buf_queue, &rdma_req->req, spdk_nvmf_request, buf_link);
1997 		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
1998 			STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
1999 		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
2000 			STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2001 		}
2002 		rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2003 	}
2004 
2005 	/* The loop here is to allow for several back-to-back state changes. */
2006 	do {
2007 		prev_state = rdma_req->state;
2008 
2009 		SPDK_DEBUGLOG(rdma, "Request %p entering state %d\n", rdma_req, prev_state);
2010 
2011 		switch (rdma_req->state) {
2012 		case RDMA_REQUEST_STATE_FREE:
2013 			/* Some external code must kick a request into RDMA_REQUEST_STATE_NEW
2014 			 * to escape this state. */
2015 			break;
2016 		case RDMA_REQUEST_STATE_NEW:
2017 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEW, 0, 0,
2018 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2019 			rdma_recv = rdma_req->recv;
2020 
2021 			/* The first element of the SGL is the NVMe command */
2022 			rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
2023 			memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
2024 
2025 			if (rqpair->ibv_state == IBV_QPS_ERR  || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
2026 				rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2027 				break;
2028 			}
2029 
2030 			if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&rdma_req->req, &rdma_req->req.dif.dif_ctx))) {
2031 				rdma_req->req.dif_enabled = true;
2032 			}
2033 
2034 			nvmf_rdma_check_fused_ordering(rtransport, rqpair, rdma_req);
2035 
2036 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
2037 			rdma_req->rsp.wr.opcode = IBV_WR_SEND;
2038 			rdma_req->rsp.wr.imm_data = 0;
2039 #endif
2040 
2041 			/* The next state transition depends on the data transfer needs of this request. */
2042 			rdma_req->req.xfer = spdk_nvmf_req_get_xfer(&rdma_req->req);
2043 
2044 			if (spdk_unlikely(rdma_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
2045 				rsp->status.sct = SPDK_NVME_SCT_GENERIC;
2046 				rsp->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
2047 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2048 				SPDK_DEBUGLOG(rdma, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", rdma_req);
2049 				break;
2050 			}
2051 
2052 			/* If no data to transfer, ready to execute. */
2053 			if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
2054 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
2055 				break;
2056 			}
2057 
2058 			rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
2059 			STAILQ_INSERT_TAIL(&rgroup->group.pending_buf_queue, &rdma_req->req, buf_link);
2060 			break;
2061 		case RDMA_REQUEST_STATE_NEED_BUFFER:
2062 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
2063 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2064 
2065 			assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
2066 
2067 			if (&rdma_req->req != STAILQ_FIRST(&rgroup->group.pending_buf_queue)) {
2068 				/* This request needs to wait in line to obtain a buffer */
2069 				break;
2070 			}
2071 
2072 			/* Try to get a data buffer */
2073 			rc = nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
2074 			if (rc < 0) {
2075 				STAILQ_REMOVE_HEAD(&rgroup->group.pending_buf_queue, buf_link);
2076 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2077 				break;
2078 			}
2079 
2080 			if (!rdma_req->req.data) {
2081 				/* No buffers available. */
2082 				rgroup->stat.pending_data_buffer++;
2083 				break;
2084 			}
2085 
2086 			STAILQ_REMOVE_HEAD(&rgroup->group.pending_buf_queue, buf_link);
2087 
2088 			/* If data is transferring from host to controller and the data didn't
2089 			 * arrive using in capsule data, we need to do a transfer from the host.
2090 			 */
2091 			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
2092 			    rdma_req->req.data_from_pool) {
2093 				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
2094 				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
2095 				break;
2096 			}
2097 
2098 			rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
2099 			break;
2100 		case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
2101 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING, 0, 0,
2102 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2103 
2104 			if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_read_queue)) {
2105 				/* This request needs to wait in line to perform RDMA */
2106 				break;
2107 			}
2108 			if (rqpair->current_send_depth + rdma_req->num_outstanding_data_wr > rqpair->max_send_depth
2109 			    || rqpair->current_read_depth + rdma_req->num_outstanding_data_wr > rqpair->max_read_depth) {
2110 				/* We can only have so many WRs outstanding. we have to wait until some finish. */
2111 				rqpair->poller->stat.pending_rdma_read++;
2112 				break;
2113 			}
2114 
2115 			/* We have already verified that this request is the head of the queue. */
2116 			STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_read_queue, state_link);
2117 
2118 			rc = request_transfer_in(&rdma_req->req);
2119 			if (!rc) {
2120 				rdma_req->state = RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
2121 			} else {
2122 				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
2123 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2124 			}
2125 			break;
2126 		case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
2127 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER, 0, 0,
2128 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2129 			/* Some external code must kick a request into RDMA_REQUEST_STATE_READY_TO_EXECUTE
2130 			 * to escape this state. */
2131 			break;
2132 		case RDMA_REQUEST_STATE_READY_TO_EXECUTE:
2133 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_EXECUTE, 0, 0,
2134 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2135 
2136 			if (spdk_unlikely(rdma_req->req.dif_enabled)) {
2137 				if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
2138 					/* generate DIF for write operation */
2139 					num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
2140 					assert(num_blocks > 0);
2141 
2142 					rc = spdk_dif_generate(rdma_req->req.iov, rdma_req->req.iovcnt,
2143 							       num_blocks, &rdma_req->req.dif.dif_ctx);
2144 					if (rc != 0) {
2145 						SPDK_ERRLOG("DIF generation failed\n");
2146 						rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2147 						spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
2148 						break;
2149 					}
2150 				}
2151 
2152 				assert(rdma_req->req.dif.elba_length >= rdma_req->req.length);
2153 				/* set extended length before IO operation */
2154 				rdma_req->req.length = rdma_req->req.dif.elba_length;
2155 			}
2156 
2157 			if (rdma_req->req.cmd->nvme_cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) {
2158 				if (rdma_req->fused_failed) {
2159 					/* This request failed FUSED semantics.  Fail it immediately, without
2160 					 * even sending it to the target layer.
2161 					 */
2162 					rsp->status.sct = SPDK_NVME_SCT_GENERIC;
2163 					rsp->status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
2164 					rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2165 					break;
2166 				}
2167 
2168 				if (rdma_req->fused_pair == NULL ||
2169 				    rdma_req->fused_pair->state != RDMA_REQUEST_STATE_READY_TO_EXECUTE) {
2170 					/* This request is ready to execute, but either we don't know yet if it's
2171 					 * valid - i.e. this is a FIRST but we haven't received the next
2172 					 * request yet or the other request of this fused pair isn't ready to
2173 					 * execute.  So break here and this request will get processed later either
2174 					 * when the other request is ready or we find that this request isn't valid.
2175 					 */
2176 					break;
2177 				}
2178 			}
2179 
2180 			/* If we get to this point, and this request is a fused command, we know that
2181 			 * it is part of valid sequence (FIRST followed by a SECOND) and that both
2182 			 * requests are READY_TO_EXECUTE. So call spdk_nvmf_request_exec() both on this
2183 			 * request, and the other request of the fused pair, in the correct order.
2184 			 * Also clear the ->fused_pair pointers on both requests, since after this point
2185 			 * we no longer need to maintain the relationship between these two requests.
2186 			 */
2187 			if (rdma_req->req.cmd->nvme_cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND) {
2188 				assert(rdma_req->fused_pair != NULL);
2189 				assert(rdma_req->fused_pair->fused_pair != NULL);
2190 				rdma_req->fused_pair->state = RDMA_REQUEST_STATE_EXECUTING;
2191 				spdk_nvmf_request_exec(&rdma_req->fused_pair->req);
2192 				rdma_req->fused_pair->fused_pair = NULL;
2193 				rdma_req->fused_pair = NULL;
2194 			}
2195 			rdma_req->state = RDMA_REQUEST_STATE_EXECUTING;
2196 			spdk_nvmf_request_exec(&rdma_req->req);
2197 			if (rdma_req->req.cmd->nvme_cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST) {
2198 				assert(rdma_req->fused_pair != NULL);
2199 				assert(rdma_req->fused_pair->fused_pair != NULL);
2200 				rdma_req->fused_pair->state = RDMA_REQUEST_STATE_EXECUTING;
2201 				spdk_nvmf_request_exec(&rdma_req->fused_pair->req);
2202 				rdma_req->fused_pair->fused_pair = NULL;
2203 				rdma_req->fused_pair = NULL;
2204 			}
2205 			break;
2206 		case RDMA_REQUEST_STATE_EXECUTING:
2207 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTING, 0, 0,
2208 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2209 			/* Some external code must kick a request into RDMA_REQUEST_STATE_EXECUTED
2210 			 * to escape this state. */
2211 			break;
2212 		case RDMA_REQUEST_STATE_EXECUTED:
2213 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0,
2214 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2215 			if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
2216 			    rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2217 				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_write_queue, rdma_req, state_link);
2218 				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING;
2219 			} else {
2220 				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2221 			}
2222 			if (spdk_unlikely(rdma_req->req.dif_enabled)) {
2223 				/* restore the original length */
2224 				rdma_req->req.length = rdma_req->req.dif.orig_length;
2225 
2226 				if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2227 					struct spdk_dif_error error_blk;
2228 
2229 					num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
2230 					if (!rdma_req->req.stripped_data) {
2231 						rc = spdk_dif_verify(rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks,
2232 								     &rdma_req->req.dif.dif_ctx, &error_blk);
2233 					} else {
2234 						rc = spdk_dif_verify_copy(rdma_req->req.stripped_data->iov,
2235 									  rdma_req->req.stripped_data->iovcnt,
2236 									  rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks,
2237 									  &rdma_req->req.dif.dif_ctx, &error_blk);
2238 					}
2239 					if (rc) {
2240 						struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl;
2241 
2242 						SPDK_ERRLOG("DIF error detected. type=%d, offset=%" PRIu32 "\n", error_blk.err_type,
2243 							    error_blk.err_offset);
2244 						rsp->status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
2245 						rsp->status.sc = nvmf_rdma_dif_error_to_compl_status(error_blk.err_type);
2246 						rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2247 						STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2248 					}
2249 				}
2250 			}
2251 			break;
2252 		case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
2253 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING, 0, 0,
2254 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2255 
2256 			if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_write_queue)) {
2257 				/* This request needs to wait in line to perform RDMA */
2258 				break;
2259 			}
2260 			if ((rqpair->current_send_depth + rdma_req->num_outstanding_data_wr + 1) >
2261 			    rqpair->max_send_depth) {
2262 				/* We can only have so many WRs outstanding. we have to wait until some finish.
2263 				 * +1 since each request has an additional wr in the resp. */
2264 				rqpair->poller->stat.pending_rdma_write++;
2265 				break;
2266 			}
2267 
2268 			/* We have already verified that this request is the head of the queue. */
2269 			STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_write_queue, state_link);
2270 
2271 			/* The data transfer will be kicked off from
2272 			 * RDMA_REQUEST_STATE_READY_TO_COMPLETE state.
2273 			 */
2274 			rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2275 			break;
2276 		case RDMA_REQUEST_STATE_READY_TO_COMPLETE:
2277 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_READY_TO_COMPLETE, 0, 0,
2278 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2279 			rc = request_transfer_out(&rdma_req->req, &data_posted);
2280 			assert(rc == 0); /* No good way to handle this currently */
2281 			if (rc) {
2282 				rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2283 			} else {
2284 				rdma_req->state = data_posted ? RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
2285 						  RDMA_REQUEST_STATE_COMPLETING;
2286 			}
2287 			break;
2288 		case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
2289 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST, 0, 0,
2290 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2291 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
2292 			 * to escape this state. */
2293 			break;
2294 		case RDMA_REQUEST_STATE_COMPLETING:
2295 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETING, 0, 0,
2296 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2297 			/* Some external code must kick a request into RDMA_REQUEST_STATE_COMPLETED
2298 			 * to escape this state. */
2299 			break;
2300 		case RDMA_REQUEST_STATE_COMPLETED:
2301 			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_COMPLETED, 0, 0,
2302 					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
2303 
2304 			rqpair->poller->stat.request_latency += spdk_get_ticks() - rdma_req->receive_tsc;
2305 			_nvmf_rdma_request_free(rdma_req, rtransport);
2306 			break;
2307 		case RDMA_REQUEST_NUM_STATES:
2308 		default:
2309 			assert(0);
2310 			break;
2311 		}
2312 
2313 		if (rdma_req->state != prev_state) {
2314 			progress = true;
2315 		}
2316 	} while (rdma_req->state != prev_state);
2317 
2318 	return progress;
2319 }
2320 
2321 /* Public API callbacks begin here */
2322 
2323 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH 128
2324 #define SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH 128
2325 #define SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH 4096
2326 #define SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR 128
2327 #define SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE 4096
2328 #define SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE 131072
2329 #define SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE (SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE / SPDK_NVMF_MAX_SGL_ENTRIES)
2330 #define SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS 4095
2331 #define SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE 32
2332 #define SPDK_NVMF_RDMA_DEFAULT_NO_SRQ false
2333 #define SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP false
2334 #define SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG 100
2335 #define SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC 1
2336 #define SPDK_NVMF_RDMA_DEFAULT_NO_WR_BATCHING false
2337 
2338 static void
2339 nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
2340 {
2341 	opts->max_queue_depth =		SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH;
2342 	opts->max_qpairs_per_ctrlr =	SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR;
2343 	opts->in_capsule_data_size =	SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE;
2344 	opts->max_io_size =		SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE;
2345 	opts->io_unit_size =		SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE;
2346 	opts->max_aq_depth =		SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH;
2347 	opts->num_shared_buffers =	SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS;
2348 	opts->buf_cache_size =		SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
2349 	opts->dif_insert_or_strip =	SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP;
2350 	opts->abort_timeout_sec =	SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC;
2351 	opts->transport_specific =      NULL;
2352 }
2353 
2354 static int nvmf_rdma_destroy(struct spdk_nvmf_transport *transport,
2355 			     spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg);
2356 
2357 static inline bool
2358 nvmf_rdma_is_rxe_device(struct spdk_nvmf_rdma_device *device)
2359 {
2360 	return device->attr.vendor_id == SPDK_RDMA_RXE_VENDOR_ID_OLD ||
2361 	       device->attr.vendor_id == SPDK_RDMA_RXE_VENDOR_ID_NEW;
2362 }
2363 
2364 static int nvmf_rdma_accept(void *ctx);
2365 static int
2366 create_ib_device(struct spdk_nvmf_rdma_transport *rtransport, struct ibv_context *context,
2367 		 struct spdk_nvmf_rdma_device **new_device)
2368 {
2369 	struct spdk_nvmf_rdma_device	*device;
2370 	int				flag = 0;
2371 	int				rc = 0;
2372 
2373 	device = calloc(1, sizeof(*device));
2374 	if (!device) {
2375 		SPDK_ERRLOG("Unable to allocate memory for RDMA devices.\n");
2376 		return -ENOMEM;
2377 	}
2378 	device->context = context;
2379 	rc = ibv_query_device(device->context, &device->attr);
2380 	if (rc < 0) {
2381 		SPDK_ERRLOG("Failed to query RDMA device attributes.\n");
2382 		free(device);
2383 		return rc;
2384 	}
2385 
2386 #ifdef SPDK_CONFIG_RDMA_SEND_WITH_INVAL
2387 	if ((device->attr.device_cap_flags & IBV_DEVICE_MEM_MGT_EXTENSIONS) == 0) {
2388 		SPDK_WARNLOG("The libibverbs on this system supports SEND_WITH_INVALIDATE,");
2389 		SPDK_WARNLOG("but the device with vendor ID %u does not.\n", device->attr.vendor_id);
2390 	}
2391 
2392 	/**
2393 	 * The vendor ID is assigned by the IEEE and an ID of 0 implies Soft-RoCE.
2394 	 * The Soft-RoCE RXE driver does not currently support send with invalidate,
2395 	 * but incorrectly reports that it does. There are changes making their way
2396 	 * through the kernel now that will enable this feature. When they are merged,
2397 	 * we can conditionally enable this feature.
2398 	 *
2399 	 * TODO: enable this for versions of the kernel rxe driver that support it.
2400 	 */
2401 	if (nvmf_rdma_is_rxe_device(device)) {
2402 		device->attr.device_cap_flags &= ~(IBV_DEVICE_MEM_MGT_EXTENSIONS);
2403 	}
2404 #endif
2405 
2406 	/* set up device context async ev fd as NON_BLOCKING */
2407 	flag = fcntl(device->context->async_fd, F_GETFL);
2408 	rc = fcntl(device->context->async_fd, F_SETFL, flag | O_NONBLOCK);
2409 	if (rc < 0) {
2410 		SPDK_ERRLOG("Failed to set context async fd to NONBLOCK.\n");
2411 		free(device);
2412 		return rc;
2413 	}
2414 
2415 	TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
2416 	SPDK_DEBUGLOG(rdma, "New device %p is added to RDMA trasport\n", device);
2417 
2418 	if (g_nvmf_hooks.get_ibv_pd) {
2419 		device->pd = g_nvmf_hooks.get_ibv_pd(NULL, device->context);
2420 	} else {
2421 		device->pd = ibv_alloc_pd(device->context);
2422 	}
2423 
2424 	if (!device->pd) {
2425 		SPDK_ERRLOG("Unable to allocate protection domain.\n");
2426 		return -ENOMEM;
2427 	}
2428 
2429 	assert(device->map == NULL);
2430 
2431 	device->map = spdk_rdma_create_mem_map(device->pd, &g_nvmf_hooks, SPDK_RDMA_MEMORY_MAP_ROLE_TARGET);
2432 	if (!device->map) {
2433 		SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
2434 		return -ENOMEM;
2435 	}
2436 
2437 	assert(device->map != NULL);
2438 	assert(device->pd != NULL);
2439 
2440 	if (new_device) {
2441 		*new_device = device;
2442 	}
2443 	return 0;
2444 }
2445 
2446 static void
2447 free_poll_fds(struct spdk_nvmf_rdma_transport *rtransport)
2448 {
2449 	if (rtransport->poll_fds) {
2450 		free(rtransport->poll_fds);
2451 		rtransport->poll_fds = NULL;
2452 	}
2453 	rtransport->npoll_fds = 0;
2454 }
2455 
2456 static int
2457 generate_poll_fds(struct spdk_nvmf_rdma_transport *rtransport)
2458 {
2459 	/* Set up poll descriptor array to monitor events from RDMA and IB
2460 	 * in a single poll syscall
2461 	 */
2462 	int device_count = 0;
2463 	int i = 0;
2464 	struct spdk_nvmf_rdma_device *device, *tmp;
2465 
2466 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2467 		device_count++;
2468 	}
2469 
2470 	rtransport->npoll_fds = device_count + 1;
2471 
2472 	rtransport->poll_fds = calloc(rtransport->npoll_fds, sizeof(struct pollfd));
2473 	if (rtransport->poll_fds == NULL) {
2474 		SPDK_ERRLOG("poll_fds allocation failed\n");
2475 		return -ENOMEM;
2476 	}
2477 
2478 	rtransport->poll_fds[i].fd = rtransport->event_channel->fd;
2479 	rtransport->poll_fds[i++].events = POLLIN;
2480 
2481 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
2482 		rtransport->poll_fds[i].fd = device->context->async_fd;
2483 		rtransport->poll_fds[i++].events = POLLIN;
2484 	}
2485 
2486 	return 0;
2487 }
2488 
2489 static struct spdk_nvmf_transport *
2490 nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
2491 {
2492 	int rc;
2493 	struct spdk_nvmf_rdma_transport *rtransport;
2494 	struct spdk_nvmf_rdma_device	*device;
2495 	struct ibv_context		**contexts;
2496 	uint32_t			i;
2497 	int				flag;
2498 	uint32_t			sge_count;
2499 	uint32_t			min_shared_buffers;
2500 	uint32_t			min_in_capsule_data_size;
2501 	int				max_device_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
2502 
2503 	rtransport = calloc(1, sizeof(*rtransport));
2504 	if (!rtransport) {
2505 		return NULL;
2506 	}
2507 
2508 	TAILQ_INIT(&rtransport->devices);
2509 	TAILQ_INIT(&rtransport->ports);
2510 	TAILQ_INIT(&rtransport->poll_groups);
2511 
2512 	rtransport->transport.ops = &spdk_nvmf_transport_rdma;
2513 	rtransport->rdma_opts.num_cqe = DEFAULT_NVMF_RDMA_CQ_SIZE;
2514 	rtransport->rdma_opts.max_srq_depth = SPDK_NVMF_RDMA_DEFAULT_SRQ_DEPTH;
2515 	rtransport->rdma_opts.no_srq = SPDK_NVMF_RDMA_DEFAULT_NO_SRQ;
2516 	rtransport->rdma_opts.acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
2517 	rtransport->rdma_opts.no_wr_batching = SPDK_NVMF_RDMA_DEFAULT_NO_WR_BATCHING;
2518 	if (opts->transport_specific != NULL &&
2519 	    spdk_json_decode_object_relaxed(opts->transport_specific, rdma_transport_opts_decoder,
2520 					    SPDK_COUNTOF(rdma_transport_opts_decoder),
2521 					    &rtransport->rdma_opts)) {
2522 		SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n");
2523 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2524 		return NULL;
2525 	}
2526 
2527 	SPDK_INFOLOG(rdma, "*** RDMA Transport Init ***\n"
2528 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
2529 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
2530 		     "  in_capsule_data_size=%d, max_aq_depth=%d,\n"
2531 		     "  num_shared_buffers=%d, num_cqe=%d, max_srq_depth=%d, no_srq=%d,"
2532 		     "  acceptor_backlog=%d, no_wr_batching=%d abort_timeout_sec=%d\n",
2533 		     opts->max_queue_depth,
2534 		     opts->max_io_size,
2535 		     opts->max_qpairs_per_ctrlr - 1,
2536 		     opts->io_unit_size,
2537 		     opts->in_capsule_data_size,
2538 		     opts->max_aq_depth,
2539 		     opts->num_shared_buffers,
2540 		     rtransport->rdma_opts.num_cqe,
2541 		     rtransport->rdma_opts.max_srq_depth,
2542 		     rtransport->rdma_opts.no_srq,
2543 		     rtransport->rdma_opts.acceptor_backlog,
2544 		     rtransport->rdma_opts.no_wr_batching,
2545 		     opts->abort_timeout_sec);
2546 
2547 	/* I/O unit size cannot be larger than max I/O size */
2548 	if (opts->io_unit_size > opts->max_io_size) {
2549 		opts->io_unit_size = opts->max_io_size;
2550 	}
2551 
2552 	if (rtransport->rdma_opts.acceptor_backlog <= 0) {
2553 		SPDK_ERRLOG("The acceptor backlog cannot be less than 1, setting to the default value of (%d).\n",
2554 			    SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG);
2555 		rtransport->rdma_opts.acceptor_backlog = SPDK_NVMF_RDMA_ACCEPTOR_BACKLOG;
2556 	}
2557 
2558 	if (opts->num_shared_buffers < (SPDK_NVMF_MAX_SGL_ENTRIES * 2)) {
2559 		SPDK_ERRLOG("The number of shared data buffers (%d) is less than"
2560 			    "the minimum number required to guarantee that forward progress can be made (%d)\n",
2561 			    opts->num_shared_buffers, (SPDK_NVMF_MAX_SGL_ENTRIES * 2));
2562 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2563 		return NULL;
2564 	}
2565 
2566 	min_shared_buffers = spdk_env_get_core_count() * opts->buf_cache_size;
2567 	if (min_shared_buffers > opts->num_shared_buffers) {
2568 		SPDK_ERRLOG("There are not enough buffers to satisfy"
2569 			    "per-poll group caches for each thread. (%" PRIu32 ")"
2570 			    "supplied. (%" PRIu32 ") required\n", opts->num_shared_buffers, min_shared_buffers);
2571 		SPDK_ERRLOG("Please specify a larger number of shared buffers\n");
2572 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2573 		return NULL;
2574 	}
2575 
2576 	sge_count = opts->max_io_size / opts->io_unit_size;
2577 	if (sge_count > NVMF_DEFAULT_TX_SGE) {
2578 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
2579 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2580 		return NULL;
2581 	}
2582 
2583 	min_in_capsule_data_size = sizeof(struct spdk_nvme_sgl_descriptor) * SPDK_NVMF_MAX_SGL_ENTRIES;
2584 	if (opts->in_capsule_data_size < min_in_capsule_data_size) {
2585 		SPDK_WARNLOG("In capsule data size is set to %u, this is minimum size required to support msdbd=16\n",
2586 			     min_in_capsule_data_size);
2587 		opts->in_capsule_data_size = min_in_capsule_data_size;
2588 	}
2589 
2590 	rtransport->event_channel = rdma_create_event_channel();
2591 	if (rtransport->event_channel == NULL) {
2592 		SPDK_ERRLOG("rdma_create_event_channel() failed, %s\n", spdk_strerror(errno));
2593 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2594 		return NULL;
2595 	}
2596 
2597 	flag = fcntl(rtransport->event_channel->fd, F_GETFL);
2598 	if (fcntl(rtransport->event_channel->fd, F_SETFL, flag | O_NONBLOCK) < 0) {
2599 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%s)\n",
2600 			    rtransport->event_channel->fd, spdk_strerror(errno));
2601 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2602 		return NULL;
2603 	}
2604 
2605 	rtransport->data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
2606 				   opts->max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES,
2607 				   sizeof(struct spdk_nvmf_rdma_request_data),
2608 				   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
2609 				   SPDK_ENV_SOCKET_ID_ANY);
2610 	if (!rtransport->data_wr_pool) {
2611 		if (spdk_mempool_lookup("spdk_nvmf_rdma_wr_data") != NULL) {
2612 			SPDK_ERRLOG("Unable to allocate work request pool for poll group: already exists\n");
2613 			SPDK_ERRLOG("Probably running in multiprocess environment, which is "
2614 				    "unsupported by the nvmf library\n");
2615 		} else {
2616 			SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
2617 		}
2618 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2619 		return NULL;
2620 	}
2621 
2622 	contexts = rdma_get_devices(NULL);
2623 	if (contexts == NULL) {
2624 		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
2625 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2626 		return NULL;
2627 	}
2628 
2629 	i = 0;
2630 	rc = 0;
2631 	while (contexts[i] != NULL) {
2632 		rc = create_ib_device(rtransport, contexts[i], &device);
2633 		if (rc < 0) {
2634 			break;
2635 		}
2636 		i++;
2637 		max_device_sge = spdk_min(max_device_sge, device->attr.max_sge);
2638 	}
2639 	rdma_free_devices(contexts);
2640 
2641 	if (opts->io_unit_size * max_device_sge < opts->max_io_size) {
2642 		/* divide and round up. */
2643 		opts->io_unit_size = (opts->max_io_size + max_device_sge - 1) / max_device_sge;
2644 
2645 		/* round up to the nearest 4k. */
2646 		opts->io_unit_size = (opts->io_unit_size + NVMF_DATA_BUFFER_ALIGNMENT - 1) & ~NVMF_DATA_BUFFER_MASK;
2647 
2648 		opts->io_unit_size = spdk_max(opts->io_unit_size, SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
2649 		SPDK_NOTICELOG("Adjusting the io unit size to fit the device's maximum I/O size. New I/O unit size %u\n",
2650 			       opts->io_unit_size);
2651 	}
2652 
2653 	if (rc < 0) {
2654 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2655 		return NULL;
2656 	}
2657 
2658 	rc = generate_poll_fds(rtransport);
2659 	if (rc < 0) {
2660 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2661 		return NULL;
2662 	}
2663 
2664 	rtransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_rdma_accept, &rtransport->transport,
2665 				    opts->acceptor_poll_rate);
2666 	if (!rtransport->accept_poller) {
2667 		nvmf_rdma_destroy(&rtransport->transport, NULL, NULL);
2668 		return NULL;
2669 	}
2670 
2671 	return &rtransport->transport;
2672 }
2673 
2674 static void
2675 destroy_ib_device(struct spdk_nvmf_rdma_transport *rtransport,
2676 		  struct spdk_nvmf_rdma_device *device)
2677 {
2678 	TAILQ_REMOVE(&rtransport->devices, device, link);
2679 	spdk_rdma_free_mem_map(&device->map);
2680 	if (device->pd) {
2681 		if (!g_nvmf_hooks.get_ibv_pd) {
2682 			ibv_dealloc_pd(device->pd);
2683 		}
2684 	}
2685 	free(device);
2686 }
2687 
2688 static void
2689 nvmf_rdma_dump_opts(struct spdk_nvmf_transport *transport, struct spdk_json_write_ctx *w)
2690 {
2691 	struct spdk_nvmf_rdma_transport	*rtransport;
2692 	assert(w != NULL);
2693 
2694 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2695 	spdk_json_write_named_uint32(w, "max_srq_depth", rtransport->rdma_opts.max_srq_depth);
2696 	spdk_json_write_named_bool(w, "no_srq", rtransport->rdma_opts.no_srq);
2697 	if (rtransport->rdma_opts.no_srq == true) {
2698 		spdk_json_write_named_int32(w, "num_cqe", rtransport->rdma_opts.num_cqe);
2699 	}
2700 	spdk_json_write_named_int32(w, "acceptor_backlog", rtransport->rdma_opts.acceptor_backlog);
2701 	spdk_json_write_named_bool(w, "no_wr_batching", rtransport->rdma_opts.no_wr_batching);
2702 }
2703 
2704 static int
2705 nvmf_rdma_destroy(struct spdk_nvmf_transport *transport,
2706 		  spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2707 {
2708 	struct spdk_nvmf_rdma_transport	*rtransport;
2709 	struct spdk_nvmf_rdma_port	*port, *port_tmp;
2710 	struct spdk_nvmf_rdma_device	*device, *device_tmp;
2711 
2712 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2713 
2714 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, port_tmp) {
2715 		TAILQ_REMOVE(&rtransport->ports, port, link);
2716 		rdma_destroy_id(port->id);
2717 		free(port);
2718 	}
2719 
2720 	free_poll_fds(rtransport);
2721 
2722 	if (rtransport->event_channel != NULL) {
2723 		rdma_destroy_event_channel(rtransport->event_channel);
2724 	}
2725 
2726 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, device_tmp) {
2727 		destroy_ib_device(rtransport, device);
2728 	}
2729 
2730 	if (rtransport->data_wr_pool != NULL) {
2731 		if (spdk_mempool_count(rtransport->data_wr_pool) !=
2732 		    (transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES)) {
2733 			SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n",
2734 				    spdk_mempool_count(rtransport->data_wr_pool),
2735 				    transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES);
2736 		}
2737 	}
2738 
2739 	spdk_mempool_free(rtransport->data_wr_pool);
2740 
2741 	spdk_poller_unregister(&rtransport->accept_poller);
2742 	free(rtransport);
2743 
2744 	if (cb_fn) {
2745 		cb_fn(cb_arg);
2746 	}
2747 	return 0;
2748 }
2749 
2750 static int nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
2751 				     struct spdk_nvme_transport_id *trid,
2752 				     bool peer);
2753 
2754 static int
2755 nvmf_rdma_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2756 		 struct spdk_nvmf_listen_opts *listen_opts)
2757 {
2758 	struct spdk_nvmf_rdma_transport	*rtransport;
2759 	struct spdk_nvmf_rdma_device	*device;
2760 	struct spdk_nvmf_rdma_port	*port;
2761 	struct addrinfo			*res;
2762 	struct addrinfo			hints;
2763 	int				family;
2764 	int				rc;
2765 
2766 	if (!strlen(trid->trsvcid)) {
2767 		SPDK_ERRLOG("Service id is required\n");
2768 		return -EINVAL;
2769 	}
2770 
2771 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2772 	assert(rtransport->event_channel != NULL);
2773 
2774 	port = calloc(1, sizeof(*port));
2775 	if (!port) {
2776 		SPDK_ERRLOG("Port allocation failed\n");
2777 		return -ENOMEM;
2778 	}
2779 
2780 	port->trid = trid;
2781 
2782 	switch (trid->adrfam) {
2783 	case SPDK_NVMF_ADRFAM_IPV4:
2784 		family = AF_INET;
2785 		break;
2786 	case SPDK_NVMF_ADRFAM_IPV6:
2787 		family = AF_INET6;
2788 		break;
2789 	default:
2790 		SPDK_ERRLOG("Unhandled ADRFAM %d\n", trid->adrfam);
2791 		free(port);
2792 		return -EINVAL;
2793 	}
2794 
2795 	memset(&hints, 0, sizeof(hints));
2796 	hints.ai_family = family;
2797 	hints.ai_flags = AI_NUMERICSERV;
2798 	hints.ai_socktype = SOCK_STREAM;
2799 	hints.ai_protocol = 0;
2800 
2801 	rc = getaddrinfo(trid->traddr, trid->trsvcid, &hints, &res);
2802 	if (rc) {
2803 		SPDK_ERRLOG("getaddrinfo failed: %s (%d)\n", gai_strerror(rc), rc);
2804 		free(port);
2805 		return -EINVAL;
2806 	}
2807 
2808 	rc = rdma_create_id(rtransport->event_channel, &port->id, port, RDMA_PS_TCP);
2809 	if (rc < 0) {
2810 		SPDK_ERRLOG("rdma_create_id() failed\n");
2811 		freeaddrinfo(res);
2812 		free(port);
2813 		return rc;
2814 	}
2815 
2816 	rc = rdma_bind_addr(port->id, res->ai_addr);
2817 	freeaddrinfo(res);
2818 
2819 	if (rc < 0) {
2820 		SPDK_ERRLOG("rdma_bind_addr() failed\n");
2821 		rdma_destroy_id(port->id);
2822 		free(port);
2823 		return rc;
2824 	}
2825 
2826 	if (!port->id->verbs) {
2827 		SPDK_ERRLOG("ibv_context is null\n");
2828 		rdma_destroy_id(port->id);
2829 		free(port);
2830 		return -1;
2831 	}
2832 
2833 	rc = rdma_listen(port->id, rtransport->rdma_opts.acceptor_backlog);
2834 	if (rc < 0) {
2835 		SPDK_ERRLOG("rdma_listen() failed\n");
2836 		rdma_destroy_id(port->id);
2837 		free(port);
2838 		return rc;
2839 	}
2840 
2841 	TAILQ_FOREACH(device, &rtransport->devices, link) {
2842 		if (device->context == port->id->verbs) {
2843 			port->device = device;
2844 			break;
2845 		}
2846 	}
2847 	if (!port->device) {
2848 		SPDK_ERRLOG("Accepted a connection with verbs %p, but unable to find a corresponding device.\n",
2849 			    port->id->verbs);
2850 		rdma_destroy_id(port->id);
2851 		free(port);
2852 		return -EINVAL;
2853 	}
2854 
2855 	SPDK_NOTICELOG("*** NVMe/RDMA Target Listening on %s port %s ***\n",
2856 		       trid->traddr, trid->trsvcid);
2857 
2858 	TAILQ_INSERT_TAIL(&rtransport->ports, port, link);
2859 	return 0;
2860 }
2861 
2862 static void
2863 nvmf_rdma_stop_listen(struct spdk_nvmf_transport *transport,
2864 		      const struct spdk_nvme_transport_id *trid)
2865 {
2866 	struct spdk_nvmf_rdma_transport *rtransport;
2867 	struct spdk_nvmf_rdma_port *port, *tmp;
2868 
2869 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
2870 
2871 	TAILQ_FOREACH_SAFE(port, &rtransport->ports, link, tmp) {
2872 		if (spdk_nvme_transport_id_compare(port->trid, trid) == 0) {
2873 			TAILQ_REMOVE(&rtransport->ports, port, link);
2874 			rdma_destroy_id(port->id);
2875 			free(port);
2876 			break;
2877 		}
2878 	}
2879 }
2880 
2881 static void
2882 nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
2883 				struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
2884 {
2885 	struct spdk_nvmf_request *req, *tmp;
2886 	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
2887 	struct spdk_nvmf_rdma_resources *resources;
2888 
2889 	/* We process I/O in the data transfer pending queue at the highest priority. RDMA reads first */
2890 	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
2891 		if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2892 			break;
2893 		}
2894 	}
2895 
2896 	/* Then RDMA writes since reads have stronger restrictions than writes */
2897 	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_write_queue, state_link, req_tmp) {
2898 		if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2899 			break;
2900 		}
2901 	}
2902 
2903 	/* Then we handle request waiting on memory buffers. */
2904 	STAILQ_FOREACH_SAFE(req, &rqpair->poller->group->group.pending_buf_queue, buf_link, tmp) {
2905 		rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
2906 		if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
2907 			break;
2908 		}
2909 	}
2910 
2911 	resources = rqpair->resources;
2912 	while (!STAILQ_EMPTY(&resources->free_queue) && !STAILQ_EMPTY(&resources->incoming_queue)) {
2913 		rdma_req = STAILQ_FIRST(&resources->free_queue);
2914 		STAILQ_REMOVE_HEAD(&resources->free_queue, state_link);
2915 		rdma_req->recv = STAILQ_FIRST(&resources->incoming_queue);
2916 		STAILQ_REMOVE_HEAD(&resources->incoming_queue, link);
2917 
2918 		if (rqpair->srq != NULL) {
2919 			rdma_req->req.qpair = &rdma_req->recv->qpair->qpair;
2920 			rdma_req->recv->qpair->qd++;
2921 		} else {
2922 			rqpair->qd++;
2923 		}
2924 
2925 		rdma_req->receive_tsc = rdma_req->recv->receive_tsc;
2926 		rdma_req->state = RDMA_REQUEST_STATE_NEW;
2927 		if (nvmf_rdma_request_process(rtransport, rdma_req) == false) {
2928 			break;
2929 		}
2930 	}
2931 	if (!STAILQ_EMPTY(&resources->incoming_queue) && STAILQ_EMPTY(&resources->free_queue)) {
2932 		rqpair->poller->stat.pending_free_request++;
2933 	}
2934 }
2935 
2936 static inline bool
2937 nvmf_rdma_can_ignore_last_wqe_reached(struct spdk_nvmf_rdma_device *device)
2938 {
2939 	/* iWARP transport and SoftRoCE driver don't support LAST_WQE_REACHED ibv async event */
2940 	return nvmf_rdma_is_rxe_device(device) ||
2941 	       device->context->device->transport_type == IBV_TRANSPORT_IWARP;
2942 }
2943 
2944 static void
2945 nvmf_rdma_destroy_drained_qpair(struct spdk_nvmf_rdma_qpair *rqpair)
2946 {
2947 	struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
2948 			struct spdk_nvmf_rdma_transport, transport);
2949 
2950 	nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
2951 
2952 	/* nvmf_rdma_close_qpair is not called */
2953 	if (!rqpair->to_close) {
2954 		return;
2955 	}
2956 
2957 	/* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */
2958 	if (rqpair->current_send_depth != 0) {
2959 		return;
2960 	}
2961 
2962 	if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) {
2963 		return;
2964 	}
2965 
2966 	if (rqpair->srq != NULL && rqpair->last_wqe_reached == false &&
2967 	    !nvmf_rdma_can_ignore_last_wqe_reached(rqpair->device)) {
2968 		return;
2969 	}
2970 
2971 	assert(rqpair->qpair.state == SPDK_NVMF_QPAIR_ERROR);
2972 
2973 	nvmf_rdma_qpair_destroy(rqpair);
2974 }
2975 
2976 static int
2977 nvmf_rdma_disconnect(struct rdma_cm_event *evt)
2978 {
2979 	struct spdk_nvmf_qpair		*qpair;
2980 	struct spdk_nvmf_rdma_qpair	*rqpair;
2981 
2982 	if (evt->id == NULL) {
2983 		SPDK_ERRLOG("disconnect request: missing cm_id\n");
2984 		return -1;
2985 	}
2986 
2987 	qpair = evt->id->context;
2988 	if (qpair == NULL) {
2989 		SPDK_ERRLOG("disconnect request: no active connection\n");
2990 		return -1;
2991 	}
2992 
2993 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
2994 
2995 	spdk_trace_record(TRACE_RDMA_QP_DISCONNECT, 0, 0, (uintptr_t)rqpair);
2996 
2997 	spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
2998 
2999 	return 0;
3000 }
3001 
3002 #ifdef DEBUG
3003 static const char *CM_EVENT_STR[] = {
3004 	"RDMA_CM_EVENT_ADDR_RESOLVED",
3005 	"RDMA_CM_EVENT_ADDR_ERROR",
3006 	"RDMA_CM_EVENT_ROUTE_RESOLVED",
3007 	"RDMA_CM_EVENT_ROUTE_ERROR",
3008 	"RDMA_CM_EVENT_CONNECT_REQUEST",
3009 	"RDMA_CM_EVENT_CONNECT_RESPONSE",
3010 	"RDMA_CM_EVENT_CONNECT_ERROR",
3011 	"RDMA_CM_EVENT_UNREACHABLE",
3012 	"RDMA_CM_EVENT_REJECTED",
3013 	"RDMA_CM_EVENT_ESTABLISHED",
3014 	"RDMA_CM_EVENT_DISCONNECTED",
3015 	"RDMA_CM_EVENT_DEVICE_REMOVAL",
3016 	"RDMA_CM_EVENT_MULTICAST_JOIN",
3017 	"RDMA_CM_EVENT_MULTICAST_ERROR",
3018 	"RDMA_CM_EVENT_ADDR_CHANGE",
3019 	"RDMA_CM_EVENT_TIMEWAIT_EXIT"
3020 };
3021 #endif /* DEBUG */
3022 
3023 static void
3024 nvmf_rdma_disconnect_qpairs_on_port(struct spdk_nvmf_rdma_transport *rtransport,
3025 				    struct spdk_nvmf_rdma_port *port)
3026 {
3027 	struct spdk_nvmf_rdma_poll_group	*rgroup;
3028 	struct spdk_nvmf_rdma_poller		*rpoller;
3029 	struct spdk_nvmf_rdma_qpair		*rqpair;
3030 
3031 	TAILQ_FOREACH(rgroup, &rtransport->poll_groups, link) {
3032 		TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
3033 			RB_FOREACH(rqpair, qpairs_tree, &rpoller->qpairs) {
3034 				if (rqpair->listen_id == port->id) {
3035 					spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
3036 				}
3037 			}
3038 		}
3039 	}
3040 }
3041 
3042 static bool
3043 nvmf_rdma_handle_cm_event_addr_change(struct spdk_nvmf_transport *transport,
3044 				      struct rdma_cm_event *event)
3045 {
3046 	const struct spdk_nvme_transport_id	*trid;
3047 	struct spdk_nvmf_rdma_port		*port;
3048 	struct spdk_nvmf_rdma_transport		*rtransport;
3049 	bool					event_acked = false;
3050 
3051 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
3052 	TAILQ_FOREACH(port, &rtransport->ports, link) {
3053 		if (port->id == event->id) {
3054 			SPDK_ERRLOG("ADDR_CHANGE: IP %s:%s migrated\n", port->trid->traddr, port->trid->trsvcid);
3055 			rdma_ack_cm_event(event);
3056 			event_acked = true;
3057 			trid = port->trid;
3058 			break;
3059 		}
3060 	}
3061 
3062 	if (event_acked) {
3063 		nvmf_rdma_disconnect_qpairs_on_port(rtransport, port);
3064 
3065 		nvmf_rdma_stop_listen(transport, trid);
3066 		nvmf_rdma_listen(transport, trid, NULL);
3067 	}
3068 
3069 	return event_acked;
3070 }
3071 
3072 static void
3073 nvmf_rdma_handle_cm_event_port_removal(struct spdk_nvmf_transport *transport,
3074 				       struct rdma_cm_event *event)
3075 {
3076 	struct spdk_nvmf_rdma_port		*port;
3077 	struct spdk_nvmf_rdma_transport		*rtransport;
3078 
3079 	port = event->id->context;
3080 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
3081 
3082 	SPDK_NOTICELOG("Port %s:%s is being removed\n", port->trid->traddr, port->trid->trsvcid);
3083 
3084 	nvmf_rdma_disconnect_qpairs_on_port(rtransport, port);
3085 
3086 	rdma_ack_cm_event(event);
3087 
3088 	while (spdk_nvmf_transport_stop_listen(transport, port->trid) == 0) {
3089 		;
3090 	}
3091 }
3092 
3093 static void
3094 nvmf_process_cm_event(struct spdk_nvmf_transport *transport)
3095 {
3096 	struct spdk_nvmf_rdma_transport *rtransport;
3097 	struct rdma_cm_event		*event;
3098 	int				rc;
3099 	bool				event_acked;
3100 
3101 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
3102 
3103 	if (rtransport->event_channel == NULL) {
3104 		return;
3105 	}
3106 
3107 	while (1) {
3108 		event_acked = false;
3109 		rc = rdma_get_cm_event(rtransport->event_channel, &event);
3110 		if (rc) {
3111 			if (errno != EAGAIN && errno != EWOULDBLOCK) {
3112 				SPDK_ERRLOG("Acceptor Event Error: %s\n", spdk_strerror(errno));
3113 			}
3114 			break;
3115 		}
3116 
3117 		SPDK_DEBUGLOG(rdma, "Acceptor Event: %s\n", CM_EVENT_STR[event->event]);
3118 
3119 		spdk_trace_record(TRACE_RDMA_CM_ASYNC_EVENT, 0, 0, 0, event->event);
3120 
3121 		switch (event->event) {
3122 		case RDMA_CM_EVENT_ADDR_RESOLVED:
3123 		case RDMA_CM_EVENT_ADDR_ERROR:
3124 		case RDMA_CM_EVENT_ROUTE_RESOLVED:
3125 		case RDMA_CM_EVENT_ROUTE_ERROR:
3126 			/* No action required. The target never attempts to resolve routes. */
3127 			break;
3128 		case RDMA_CM_EVENT_CONNECT_REQUEST:
3129 			rc = nvmf_rdma_connect(transport, event);
3130 			if (rc < 0) {
3131 				SPDK_ERRLOG("Unable to process connect event. rc: %d\n", rc);
3132 				break;
3133 			}
3134 			break;
3135 		case RDMA_CM_EVENT_CONNECT_RESPONSE:
3136 			/* The target never initiates a new connection. So this will not occur. */
3137 			break;
3138 		case RDMA_CM_EVENT_CONNECT_ERROR:
3139 			/* Can this happen? The docs say it can, but not sure what causes it. */
3140 			break;
3141 		case RDMA_CM_EVENT_UNREACHABLE:
3142 		case RDMA_CM_EVENT_REJECTED:
3143 			/* These only occur on the client side. */
3144 			break;
3145 		case RDMA_CM_EVENT_ESTABLISHED:
3146 			/* TODO: Should we be waiting for this event anywhere? */
3147 			break;
3148 		case RDMA_CM_EVENT_DISCONNECTED:
3149 			rc = nvmf_rdma_disconnect(event);
3150 			if (rc < 0) {
3151 				SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
3152 				break;
3153 			}
3154 			break;
3155 		case RDMA_CM_EVENT_DEVICE_REMOVAL:
3156 			/* In case of device removal, kernel IB part triggers IBV_EVENT_DEVICE_FATAL
3157 			 * which triggers RDMA_CM_EVENT_DEVICE_REMOVAL on all cma_id’s.
3158 			 * Once these events are sent to SPDK, we should release all IB resources and
3159 			 * don't make attempts to call any ibv_query/modify/create functions. We can only call
3160 			 * ibv_destroy* functions to release user space memory allocated by IB. All kernel
3161 			 * resources are already cleaned. */
3162 			if (event->id->qp) {
3163 				/* If rdma_cm event has a valid `qp` pointer then the event refers to the
3164 				 * corresponding qpair. Otherwise the event refers to a listening device */
3165 				rc = nvmf_rdma_disconnect(event);
3166 				if (rc < 0) {
3167 					SPDK_ERRLOG("Unable to process disconnect event. rc: %d\n", rc);
3168 					break;
3169 				}
3170 			} else {
3171 				nvmf_rdma_handle_cm_event_port_removal(transport, event);
3172 				event_acked = true;
3173 			}
3174 			break;
3175 		case RDMA_CM_EVENT_MULTICAST_JOIN:
3176 		case RDMA_CM_EVENT_MULTICAST_ERROR:
3177 			/* Multicast is not used */
3178 			break;
3179 		case RDMA_CM_EVENT_ADDR_CHANGE:
3180 			event_acked = nvmf_rdma_handle_cm_event_addr_change(transport, event);
3181 			break;
3182 		case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3183 			/* For now, do nothing. The target never re-uses queue pairs. */
3184 			break;
3185 		default:
3186 			SPDK_ERRLOG("Unexpected Acceptor Event [%d]\n", event->event);
3187 			break;
3188 		}
3189 		if (!event_acked) {
3190 			rdma_ack_cm_event(event);
3191 		}
3192 	}
3193 }
3194 
3195 static void
3196 nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair)
3197 {
3198 	rqpair->last_wqe_reached = true;
3199 	nvmf_rdma_destroy_drained_qpair(rqpair);
3200 }
3201 
3202 static void
3203 nvmf_rdma_qpair_process_ibv_event(void *ctx)
3204 {
3205 	struct spdk_nvmf_rdma_ibv_event_ctx *event_ctx = ctx;
3206 
3207 	if (event_ctx->rqpair) {
3208 		STAILQ_REMOVE(&event_ctx->rqpair->ibv_events, event_ctx, spdk_nvmf_rdma_ibv_event_ctx, link);
3209 		if (event_ctx->cb_fn) {
3210 			event_ctx->cb_fn(event_ctx->rqpair);
3211 		}
3212 	}
3213 	free(event_ctx);
3214 }
3215 
3216 static int
3217 nvmf_rdma_send_qpair_async_event(struct spdk_nvmf_rdma_qpair *rqpair,
3218 				 spdk_nvmf_rdma_qpair_ibv_event fn)
3219 {
3220 	struct spdk_nvmf_rdma_ibv_event_ctx *ctx;
3221 	struct spdk_thread *thr = NULL;
3222 	int rc;
3223 
3224 	if (rqpair->qpair.group) {
3225 		thr = rqpair->qpair.group->thread;
3226 	} else if (rqpair->destruct_channel) {
3227 		thr = spdk_io_channel_get_thread(rqpair->destruct_channel);
3228 	}
3229 
3230 	if (!thr) {
3231 		SPDK_DEBUGLOG(rdma, "rqpair %p has no thread\n", rqpair);
3232 		return -EINVAL;
3233 	}
3234 
3235 	ctx = calloc(1, sizeof(*ctx));
3236 	if (!ctx) {
3237 		return -ENOMEM;
3238 	}
3239 
3240 	ctx->rqpair = rqpair;
3241 	ctx->cb_fn = fn;
3242 	STAILQ_INSERT_TAIL(&rqpair->ibv_events, ctx, link);
3243 
3244 	rc = spdk_thread_send_msg(thr, nvmf_rdma_qpair_process_ibv_event, ctx);
3245 	if (rc) {
3246 		STAILQ_REMOVE(&rqpair->ibv_events, ctx, spdk_nvmf_rdma_ibv_event_ctx, link);
3247 		free(ctx);
3248 	}
3249 
3250 	return rc;
3251 }
3252 
3253 static int
3254 nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
3255 {
3256 	int				rc;
3257 	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
3258 	struct ibv_async_event		event;
3259 
3260 	rc = ibv_get_async_event(device->context, &event);
3261 
3262 	if (rc) {
3263 		/* In non-blocking mode -1 means there are no events available */
3264 		return rc;
3265 	}
3266 
3267 	switch (event.event_type) {
3268 	case IBV_EVENT_QP_FATAL:
3269 	case IBV_EVENT_QP_LAST_WQE_REACHED:
3270 	case IBV_EVENT_SQ_DRAINED:
3271 	case IBV_EVENT_QP_REQ_ERR:
3272 	case IBV_EVENT_QP_ACCESS_ERR:
3273 	case IBV_EVENT_COMM_EST:
3274 	case IBV_EVENT_PATH_MIG:
3275 	case IBV_EVENT_PATH_MIG_ERR:
3276 		rqpair = event.element.qp->qp_context;
3277 		if (!rqpair) {
3278 			/* Any QP event for NVMe-RDMA initiator may be returned. */
3279 			SPDK_NOTICELOG("Async QP event for unknown QP: %s\n",
3280 				       ibv_event_type_str(event.event_type));
3281 			break;
3282 		}
3283 
3284 		switch (event.event_type) {
3285 		case IBV_EVENT_QP_FATAL:
3286 			SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
3287 			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
3288 					  (uintptr_t)rqpair, event.event_type);
3289 			nvmf_rdma_update_ibv_state(rqpair);
3290 			spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
3291 			break;
3292 		case IBV_EVENT_QP_LAST_WQE_REACHED:
3293 			/* This event only occurs for shared receive queues. */
3294 			SPDK_DEBUGLOG(rdma, "Last WQE reached event received for rqpair %p\n", rqpair);
3295 			rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_last_wqe_reached);
3296 			if (rc) {
3297 				SPDK_WARNLOG("Failed to send LAST_WQE_REACHED event. rqpair %p, err %d\n", rqpair, rc);
3298 				rqpair->last_wqe_reached = true;
3299 			}
3300 			break;
3301 		case IBV_EVENT_SQ_DRAINED:
3302 			/* This event occurs frequently in both error and non-error states.
3303 			 * Check if the qpair is in an error state before sending a message. */
3304 			SPDK_DEBUGLOG(rdma, "Last sq drained event received for rqpair %p\n", rqpair);
3305 			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
3306 					  (uintptr_t)rqpair, event.event_type);
3307 			if (nvmf_rdma_update_ibv_state(rqpair) == IBV_QPS_ERR) {
3308 				spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
3309 			}
3310 			break;
3311 		case IBV_EVENT_QP_REQ_ERR:
3312 		case IBV_EVENT_QP_ACCESS_ERR:
3313 		case IBV_EVENT_COMM_EST:
3314 		case IBV_EVENT_PATH_MIG:
3315 		case IBV_EVENT_PATH_MIG_ERR:
3316 			SPDK_NOTICELOG("Async QP event: %s\n",
3317 				       ibv_event_type_str(event.event_type));
3318 			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
3319 					  (uintptr_t)rqpair, event.event_type);
3320 			nvmf_rdma_update_ibv_state(rqpair);
3321 			break;
3322 		default:
3323 			break;
3324 		}
3325 		break;
3326 	case IBV_EVENT_CQ_ERR:
3327 	case IBV_EVENT_DEVICE_FATAL:
3328 	case IBV_EVENT_PORT_ACTIVE:
3329 	case IBV_EVENT_PORT_ERR:
3330 	case IBV_EVENT_LID_CHANGE:
3331 	case IBV_EVENT_PKEY_CHANGE:
3332 	case IBV_EVENT_SM_CHANGE:
3333 	case IBV_EVENT_SRQ_ERR:
3334 	case IBV_EVENT_SRQ_LIMIT_REACHED:
3335 	case IBV_EVENT_CLIENT_REREGISTER:
3336 	case IBV_EVENT_GID_CHANGE:
3337 	default:
3338 		SPDK_NOTICELOG("Async event: %s\n",
3339 			       ibv_event_type_str(event.event_type));
3340 		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0, 0, event.event_type);
3341 		break;
3342 	}
3343 	ibv_ack_async_event(&event);
3344 
3345 	return 0;
3346 }
3347 
3348 static void
3349 nvmf_process_ib_events(struct spdk_nvmf_rdma_device *device, uint32_t max_events)
3350 {
3351 	int rc = 0;
3352 	uint32_t i = 0;
3353 
3354 	for (i = 0; i < max_events; i++) {
3355 		rc = nvmf_process_ib_event(device);
3356 		if (rc) {
3357 			break;
3358 		}
3359 	}
3360 
3361 	SPDK_DEBUGLOG(rdma, "Device %s: %u events processed\n", device->context->device->name, i);
3362 }
3363 
3364 static int
3365 nvmf_rdma_accept(void *ctx)
3366 {
3367 	int	nfds, i = 0;
3368 	struct spdk_nvmf_transport *transport = ctx;
3369 	struct spdk_nvmf_rdma_transport *rtransport;
3370 	struct spdk_nvmf_rdma_device *device, *tmp;
3371 	uint32_t count;
3372 
3373 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
3374 	count = nfds = poll(rtransport->poll_fds, rtransport->npoll_fds, 0);
3375 
3376 	if (nfds <= 0) {
3377 		return SPDK_POLLER_IDLE;
3378 	}
3379 
3380 	/* The first poll descriptor is RDMA CM event */
3381 	if (rtransport->poll_fds[i++].revents & POLLIN) {
3382 		nvmf_process_cm_event(transport);
3383 		nfds--;
3384 	}
3385 
3386 	if (nfds == 0) {
3387 		return SPDK_POLLER_BUSY;
3388 	}
3389 
3390 	/* Second and subsequent poll descriptors are IB async events */
3391 	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
3392 		if (rtransport->poll_fds[i++].revents & POLLIN) {
3393 			nvmf_process_ib_events(device, 32);
3394 			nfds--;
3395 		}
3396 	}
3397 	/* check all flagged fd's have been served */
3398 	assert(nfds == 0);
3399 
3400 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
3401 }
3402 
3403 static void
3404 nvmf_rdma_cdata_init(struct spdk_nvmf_transport *transport, struct spdk_nvmf_subsystem *subsystem,
3405 		     struct spdk_nvmf_ctrlr_data *cdata)
3406 {
3407 	cdata->nvmf_specific.msdbd = SPDK_NVMF_MAX_SGL_ENTRIES;
3408 
3409 	/* Disable in-capsule data transfer for RDMA controller when dif_insert_or_strip is enabled
3410 	since in-capsule data only works with NVME drives that support SGL memory layout */
3411 	if (transport->opts.dif_insert_or_strip) {
3412 		cdata->nvmf_specific.ioccsz = sizeof(struct spdk_nvme_cmd) / 16;
3413 	}
3414 
3415 	if (cdata->nvmf_specific.ioccsz > ((sizeof(struct spdk_nvme_cmd) + 0x1000) / 16)) {
3416 		SPDK_WARNLOG("RDMA is configured to support up to 16 SGL entries while in capsule"
3417 			     " data is greater than 4KiB.\n");
3418 		SPDK_WARNLOG("When used in conjunction with the NVMe-oF initiator from the Linux "
3419 			     "kernel between versions 5.4 and 5.12 data corruption may occur for "
3420 			     "writes that are not a multiple of 4KiB in size.\n");
3421 	}
3422 }
3423 
3424 static void
3425 nvmf_rdma_discover(struct spdk_nvmf_transport *transport,
3426 		   struct spdk_nvme_transport_id *trid,
3427 		   struct spdk_nvmf_discovery_log_page_entry *entry)
3428 {
3429 	entry->trtype = SPDK_NVMF_TRTYPE_RDMA;
3430 	entry->adrfam = trid->adrfam;
3431 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_REQUIRED;
3432 
3433 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
3434 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
3435 
3436 	entry->tsas.rdma.rdma_qptype = SPDK_NVMF_RDMA_QPTYPE_RELIABLE_CONNECTED;
3437 	entry->tsas.rdma.rdma_prtype = SPDK_NVMF_RDMA_PRTYPE_NONE;
3438 	entry->tsas.rdma.rdma_cms = SPDK_NVMF_RDMA_CMS_RDMA_CM;
3439 }
3440 
3441 static int
3442 nvmf_rdma_poller_create(struct spdk_nvmf_rdma_transport *rtransport,
3443 			struct spdk_nvmf_rdma_poll_group *rgroup, struct spdk_nvmf_rdma_device *device,
3444 			struct spdk_nvmf_rdma_poller **out_poller)
3445 {
3446 	struct spdk_nvmf_rdma_poller		*poller;
3447 	struct spdk_rdma_srq_init_attr		srq_init_attr;
3448 	struct spdk_nvmf_rdma_resource_opts	opts;
3449 	int					num_cqe;
3450 
3451 	poller = calloc(1, sizeof(*poller));
3452 	if (!poller) {
3453 		SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n");
3454 		return -1;
3455 	}
3456 
3457 	poller->device = device;
3458 	poller->group = rgroup;
3459 	*out_poller = poller;
3460 
3461 	RB_INIT(&poller->qpairs);
3462 	STAILQ_INIT(&poller->qpairs_pending_send);
3463 	STAILQ_INIT(&poller->qpairs_pending_recv);
3464 
3465 	TAILQ_INSERT_TAIL(&rgroup->pollers, poller, link);
3466 	SPDK_DEBUGLOG(rdma, "Create poller %p on device %p in poll group %p.\n", poller, device, rgroup);
3467 	if (rtransport->rdma_opts.no_srq == false && device->num_srq < device->attr.max_srq) {
3468 		if ((int)rtransport->rdma_opts.max_srq_depth > device->attr.max_srq_wr) {
3469 			SPDK_WARNLOG("Requested SRQ depth %u, max supported by dev %s is %d\n",
3470 				     rtransport->rdma_opts.max_srq_depth, device->context->device->name, device->attr.max_srq_wr);
3471 		}
3472 		poller->max_srq_depth = spdk_min((int)rtransport->rdma_opts.max_srq_depth, device->attr.max_srq_wr);
3473 
3474 		device->num_srq++;
3475 		memset(&srq_init_attr, 0, sizeof(srq_init_attr));
3476 		srq_init_attr.pd = device->pd;
3477 		srq_init_attr.stats = &poller->stat.qp_stats.recv;
3478 		srq_init_attr.srq_init_attr.attr.max_wr = poller->max_srq_depth;
3479 		srq_init_attr.srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
3480 		poller->srq = spdk_rdma_srq_create(&srq_init_attr);
3481 		if (!poller->srq) {
3482 			SPDK_ERRLOG("Unable to create shared receive queue, errno %d\n", errno);
3483 			return -1;
3484 		}
3485 
3486 		opts.qp = poller->srq;
3487 		opts.map = device->map;
3488 		opts.qpair = NULL;
3489 		opts.shared = true;
3490 		opts.max_queue_depth = poller->max_srq_depth;
3491 		opts.in_capsule_data_size = rtransport->transport.opts.in_capsule_data_size;
3492 
3493 		poller->resources = nvmf_rdma_resources_create(&opts);
3494 		if (!poller->resources) {
3495 			SPDK_ERRLOG("Unable to allocate resources for shared receive queue.\n");
3496 			return -1;
3497 		}
3498 	}
3499 
3500 	/*
3501 	 * When using an srq, we can limit the completion queue at startup.
3502 	 * The following formula represents the calculation:
3503 	 * num_cqe = num_recv + num_data_wr + num_send_wr.
3504 	 * where num_recv=num_data_wr=and num_send_wr=poller->max_srq_depth
3505 	 */
3506 	if (poller->srq) {
3507 		num_cqe = poller->max_srq_depth * 3;
3508 	} else {
3509 		num_cqe = rtransport->rdma_opts.num_cqe;
3510 	}
3511 
3512 	poller->cq = ibv_create_cq(device->context, num_cqe, poller, NULL, 0);
3513 	if (!poller->cq) {
3514 		SPDK_ERRLOG("Unable to create completion queue\n");
3515 		return -1;
3516 	}
3517 	poller->num_cqe = num_cqe;
3518 	return 0;
3519 }
3520 
3521 static void nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);
3522 
3523 static struct spdk_nvmf_transport_poll_group *
3524 nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport,
3525 			    struct spdk_nvmf_poll_group *group)
3526 {
3527 	struct spdk_nvmf_rdma_transport		*rtransport;
3528 	struct spdk_nvmf_rdma_poll_group	*rgroup;
3529 	struct spdk_nvmf_rdma_poller		*poller;
3530 	struct spdk_nvmf_rdma_device		*device;
3531 	int					rc;
3532 
3533 	rtransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_rdma_transport, transport);
3534 
3535 	rgroup = calloc(1, sizeof(*rgroup));
3536 	if (!rgroup) {
3537 		return NULL;
3538 	}
3539 
3540 	TAILQ_INIT(&rgroup->pollers);
3541 
3542 	TAILQ_FOREACH(device, &rtransport->devices, link) {
3543 		rc = nvmf_rdma_poller_create(rtransport, rgroup, device, &poller);
3544 		if (rc < 0) {
3545 			nvmf_rdma_poll_group_destroy(&rgroup->group);
3546 			return NULL;
3547 		}
3548 	}
3549 
3550 	TAILQ_INSERT_TAIL(&rtransport->poll_groups, rgroup, link);
3551 	if (rtransport->conn_sched.next_admin_pg == NULL) {
3552 		rtransport->conn_sched.next_admin_pg = rgroup;
3553 		rtransport->conn_sched.next_io_pg = rgroup;
3554 	}
3555 
3556 	return &rgroup->group;
3557 }
3558 
3559 static uint32_t
3560 nvmf_poll_group_get_io_qpair_count(struct spdk_nvmf_poll_group *pg)
3561 {
3562 	uint32_t count;
3563 
3564 	/* Just assume that unassociated qpairs will eventually be io
3565 	 * qpairs.  This is close enough for the use cases for this
3566 	 * function.
3567 	 */
3568 	pthread_mutex_lock(&pg->mutex);
3569 	count = pg->stat.current_io_qpairs + pg->current_unassociated_qpairs;
3570 	pthread_mutex_unlock(&pg->mutex);
3571 
3572 	return count;
3573 }
3574 
3575 static struct spdk_nvmf_transport_poll_group *
3576 nvmf_rdma_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
3577 {
3578 	struct spdk_nvmf_rdma_transport *rtransport;
3579 	struct spdk_nvmf_rdma_poll_group **pg;
3580 	struct spdk_nvmf_transport_poll_group *result;
3581 	uint32_t count;
3582 
3583 	rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
3584 
3585 	if (TAILQ_EMPTY(&rtransport->poll_groups)) {
3586 		return NULL;
3587 	}
3588 
3589 	if (qpair->qid == 0) {
3590 		pg = &rtransport->conn_sched.next_admin_pg;
3591 	} else {
3592 		struct spdk_nvmf_rdma_poll_group *pg_min, *pg_start, *pg_current;
3593 		uint32_t min_value;
3594 
3595 		pg = &rtransport->conn_sched.next_io_pg;
3596 		pg_min = *pg;
3597 		pg_start = *pg;
3598 		pg_current = *pg;
3599 		min_value = nvmf_poll_group_get_io_qpair_count(pg_current->group.group);
3600 
3601 		while ((count = nvmf_poll_group_get_io_qpair_count(pg_current->group.group)) > 0) {
3602 			pg_current = TAILQ_NEXT(pg_current, link);
3603 			if (pg_current == NULL) {
3604 				pg_current = TAILQ_FIRST(&rtransport->poll_groups);
3605 			}
3606 
3607 			if (count < min_value) {
3608 				min_value = count;
3609 				pg_min = pg_current;
3610 			}
3611 
3612 			if (pg_current == pg_start) {
3613 				break;
3614 			}
3615 		}
3616 		*pg = pg_min;
3617 	}
3618 
3619 	assert(*pg != NULL);
3620 
3621 	result = &(*pg)->group;
3622 
3623 	*pg = TAILQ_NEXT(*pg, link);
3624 	if (*pg == NULL) {
3625 		*pg = TAILQ_FIRST(&rtransport->poll_groups);
3626 	}
3627 
3628 	return result;
3629 }
3630 
3631 static void
3632 nvmf_rdma_poller_destroy(struct spdk_nvmf_rdma_poller *poller)
3633 {
3634 	struct spdk_nvmf_rdma_qpair	*qpair, *tmp_qpair;
3635 	RB_FOREACH_SAFE(qpair, qpairs_tree, &poller->qpairs, tmp_qpair) {
3636 		nvmf_rdma_qpair_destroy(qpair);
3637 	}
3638 
3639 	if (poller->srq) {
3640 		if (poller->resources) {
3641 			nvmf_rdma_resources_destroy(poller->resources);
3642 		}
3643 		spdk_rdma_srq_destroy(poller->srq);
3644 		SPDK_DEBUGLOG(rdma, "Destroyed RDMA shared queue %p\n", poller->srq);
3645 	}
3646 
3647 	if (poller->cq) {
3648 		ibv_destroy_cq(poller->cq);
3649 	}
3650 
3651 	free(poller);
3652 }
3653 
3654 static void
3655 nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
3656 {
3657 	struct spdk_nvmf_rdma_poll_group	*rgroup, *next_rgroup;
3658 	struct spdk_nvmf_rdma_poller		*poller, *tmp;
3659 	struct spdk_nvmf_rdma_transport		*rtransport;
3660 
3661 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
3662 	if (!rgroup) {
3663 		return;
3664 	}
3665 
3666 	TAILQ_FOREACH_SAFE(poller, &rgroup->pollers, link, tmp) {
3667 		TAILQ_REMOVE(&rgroup->pollers, poller, link);
3668 		nvmf_rdma_poller_destroy(poller);
3669 	}
3670 
3671 	if (rgroup->group.transport == NULL) {
3672 		/* Transport can be NULL when nvmf_rdma_poll_group_create()
3673 		 * calls this function directly in a failure path. */
3674 		free(rgroup);
3675 		return;
3676 	}
3677 
3678 	rtransport = SPDK_CONTAINEROF(rgroup->group.transport, struct spdk_nvmf_rdma_transport, transport);
3679 
3680 	next_rgroup = TAILQ_NEXT(rgroup, link);
3681 	TAILQ_REMOVE(&rtransport->poll_groups, rgroup, link);
3682 	if (next_rgroup == NULL) {
3683 		next_rgroup = TAILQ_FIRST(&rtransport->poll_groups);
3684 	}
3685 	if (rtransport->conn_sched.next_admin_pg == rgroup) {
3686 		rtransport->conn_sched.next_admin_pg = next_rgroup;
3687 	}
3688 	if (rtransport->conn_sched.next_io_pg == rgroup) {
3689 		rtransport->conn_sched.next_io_pg = next_rgroup;
3690 	}
3691 
3692 	free(rgroup);
3693 }
3694 
3695 static void
3696 nvmf_rdma_qpair_reject_connection(struct spdk_nvmf_rdma_qpair *rqpair)
3697 {
3698 	if (rqpair->cm_id != NULL) {
3699 		nvmf_rdma_event_reject(rqpair->cm_id, SPDK_NVMF_RDMA_ERROR_NO_RESOURCES);
3700 	}
3701 }
3702 
3703 static int
3704 nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
3705 			 struct spdk_nvmf_qpair *qpair)
3706 {
3707 	struct spdk_nvmf_rdma_poll_group	*rgroup;
3708 	struct spdk_nvmf_rdma_qpair		*rqpair;
3709 	struct spdk_nvmf_rdma_device		*device;
3710 	struct spdk_nvmf_rdma_poller		*poller;
3711 	int					rc;
3712 
3713 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
3714 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3715 
3716 	device = rqpair->device;
3717 
3718 	TAILQ_FOREACH(poller, &rgroup->pollers, link) {
3719 		if (poller->device == device) {
3720 			break;
3721 		}
3722 	}
3723 
3724 	if (!poller) {
3725 		SPDK_ERRLOG("No poller found for device.\n");
3726 		return -1;
3727 	}
3728 
3729 	rqpair->poller = poller;
3730 	rqpair->srq = rqpair->poller->srq;
3731 
3732 	rc = nvmf_rdma_qpair_initialize(qpair);
3733 	if (rc < 0) {
3734 		SPDK_ERRLOG("Failed to initialize nvmf_rdma_qpair with qpair=%p\n", qpair);
3735 		rqpair->poller = NULL;
3736 		rqpair->srq = NULL;
3737 		return -1;
3738 	}
3739 
3740 	RB_INSERT(qpairs_tree, &poller->qpairs, rqpair);
3741 
3742 	rc = nvmf_rdma_event_accept(rqpair->cm_id, rqpair);
3743 	if (rc) {
3744 		/* Try to reject, but we probably can't */
3745 		nvmf_rdma_qpair_reject_connection(rqpair);
3746 		return -1;
3747 	}
3748 
3749 	nvmf_rdma_update_ibv_state(rqpair);
3750 
3751 	return 0;
3752 }
3753 
3754 static int
3755 nvmf_rdma_poll_group_remove(struct spdk_nvmf_transport_poll_group *group,
3756 			    struct spdk_nvmf_qpair *qpair)
3757 {
3758 	struct spdk_nvmf_rdma_qpair		*rqpair;
3759 
3760 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3761 	assert(group->transport->tgt != NULL);
3762 
3763 	rqpair->destruct_channel = spdk_get_io_channel(group->transport->tgt);
3764 
3765 	if (!rqpair->destruct_channel) {
3766 		SPDK_WARNLOG("failed to get io_channel, qpair %p\n", qpair);
3767 		return 0;
3768 	}
3769 
3770 	/* Sanity check that we get io_channel on the correct thread */
3771 	if (qpair->group) {
3772 		assert(qpair->group->thread == spdk_io_channel_get_thread(rqpair->destruct_channel));
3773 	}
3774 
3775 	return 0;
3776 }
3777 
3778 static int
3779 nvmf_rdma_request_free(struct spdk_nvmf_request *req)
3780 {
3781 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
3782 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
3783 			struct spdk_nvmf_rdma_transport, transport);
3784 	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
3785 					      struct spdk_nvmf_rdma_qpair, qpair);
3786 
3787 	/*
3788 	 * AER requests are freed when a qpair is destroyed. The recv corresponding to that request
3789 	 * needs to be returned to the shared receive queue or the poll group will eventually be
3790 	 * starved of RECV structures.
3791 	 */
3792 	if (rqpair->srq && rdma_req->recv) {
3793 		int rc;
3794 		struct ibv_recv_wr *bad_recv_wr;
3795 
3796 		spdk_rdma_srq_queue_recv_wrs(rqpair->srq, &rdma_req->recv->wr);
3797 		rc = spdk_rdma_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
3798 		if (rc) {
3799 			SPDK_ERRLOG("Unable to re-post rx descriptor\n");
3800 		}
3801 	}
3802 
3803 	_nvmf_rdma_request_free(rdma_req, rtransport);
3804 	return 0;
3805 }
3806 
3807 static int
3808 nvmf_rdma_request_complete(struct spdk_nvmf_request *req)
3809 {
3810 	struct spdk_nvmf_rdma_transport	*rtransport = SPDK_CONTAINEROF(req->qpair->transport,
3811 			struct spdk_nvmf_rdma_transport, transport);
3812 	struct spdk_nvmf_rdma_request	*rdma_req = SPDK_CONTAINEROF(req,
3813 			struct spdk_nvmf_rdma_request, req);
3814 	struct spdk_nvmf_rdma_qpair     *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
3815 			struct spdk_nvmf_rdma_qpair, qpair);
3816 
3817 	if (rqpair->ibv_state != IBV_QPS_ERR) {
3818 		/* The connection is alive, so process the request as normal */
3819 		rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
3820 	} else {
3821 		/* The connection is dead. Move the request directly to the completed state. */
3822 		rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3823 	}
3824 
3825 	nvmf_rdma_request_process(rtransport, rdma_req);
3826 
3827 	return 0;
3828 }
3829 
3830 static void
3831 nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair,
3832 		      spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
3833 {
3834 	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
3835 
3836 	rqpair->to_close = true;
3837 
3838 	/* This happens only when the qpair is disconnected before
3839 	 * it is added to the poll group. Since there is no poll group,
3840 	 * the RDMA qp has not been initialized yet and the RDMA CM
3841 	 * event has not yet been acknowledged, so we need to reject it.
3842 	 */
3843 	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_UNINITIALIZED) {
3844 		nvmf_rdma_qpair_reject_connection(rqpair);
3845 		nvmf_rdma_qpair_destroy(rqpair);
3846 		return;
3847 	}
3848 
3849 	if (rqpair->rdma_qp) {
3850 		spdk_rdma_qp_disconnect(rqpair->rdma_qp);
3851 	}
3852 
3853 	nvmf_rdma_destroy_drained_qpair(rqpair);
3854 
3855 	if (cb_fn) {
3856 		cb_fn(cb_arg);
3857 	}
3858 }
3859 
3860 static struct spdk_nvmf_rdma_qpair *
3861 get_rdma_qpair_from_wc(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_wc *wc)
3862 {
3863 	struct spdk_nvmf_rdma_qpair find;
3864 
3865 	find.qp_num = wc->qp_num;
3866 
3867 	return RB_FIND(qpairs_tree, &rpoller->qpairs, &find);
3868 }
3869 
3870 #ifdef DEBUG
3871 static int
3872 nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
3873 {
3874 	return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
3875 	       rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
3876 }
3877 #endif
3878 
3879 static void
3880 _poller_reset_failed_recvs(struct spdk_nvmf_rdma_poller *rpoller, struct ibv_recv_wr *bad_recv_wr,
3881 			   int rc)
3882 {
3883 	struct spdk_nvmf_rdma_recv	*rdma_recv;
3884 	struct spdk_nvmf_rdma_wr	*bad_rdma_wr;
3885 
3886 	SPDK_ERRLOG("Failed to post a recv for the poller %p with errno %d\n", rpoller, -rc);
3887 	while (bad_recv_wr != NULL) {
3888 		bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_recv_wr->wr_id;
3889 		rdma_recv = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
3890 
3891 		rdma_recv->qpair->current_recv_depth++;
3892 		bad_recv_wr = bad_recv_wr->next;
3893 		SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rdma_recv->qpair, -rc);
3894 		spdk_nvmf_qpair_disconnect(&rdma_recv->qpair->qpair, NULL, NULL);
3895 	}
3896 }
3897 
3898 static void
3899 _qp_reset_failed_recvs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_recv_wr *bad_recv_wr, int rc)
3900 {
3901 	SPDK_ERRLOG("Failed to post a recv for the qpair %p with errno %d\n", rqpair, -rc);
3902 	while (bad_recv_wr != NULL) {
3903 		bad_recv_wr = bad_recv_wr->next;
3904 		rqpair->current_recv_depth++;
3905 	}
3906 	spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
3907 }
3908 
3909 static void
3910 _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
3911 		     struct spdk_nvmf_rdma_poller *rpoller)
3912 {
3913 	struct spdk_nvmf_rdma_qpair	*rqpair;
3914 	struct ibv_recv_wr		*bad_recv_wr;
3915 	int				rc;
3916 
3917 	if (rpoller->srq) {
3918 		rc = spdk_rdma_srq_flush_recv_wrs(rpoller->srq, &bad_recv_wr);
3919 		if (rc) {
3920 			_poller_reset_failed_recvs(rpoller, bad_recv_wr, rc);
3921 		}
3922 	} else {
3923 		while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
3924 			rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
3925 			rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
3926 			if (rc) {
3927 				_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
3928 			}
3929 			STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_recv, recv_link);
3930 		}
3931 	}
3932 }
3933 
3934 static void
3935 _qp_reset_failed_sends(struct spdk_nvmf_rdma_transport *rtransport,
3936 		       struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_send_wr *bad_wr, int rc)
3937 {
3938 	struct spdk_nvmf_rdma_wr	*bad_rdma_wr;
3939 	struct spdk_nvmf_rdma_request	*prev_rdma_req = NULL, *cur_rdma_req = NULL;
3940 
3941 	SPDK_ERRLOG("Failed to post a send for the qpair %p with errno %d\n", rqpair, -rc);
3942 	for (; bad_wr != NULL; bad_wr = bad_wr->next) {
3943 		bad_rdma_wr = (struct spdk_nvmf_rdma_wr *)bad_wr->wr_id;
3944 		assert(rqpair->current_send_depth > 0);
3945 		rqpair->current_send_depth--;
3946 		switch (bad_rdma_wr->type) {
3947 		case RDMA_WR_TYPE_DATA:
3948 			cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
3949 			if (bad_wr->opcode == IBV_WR_RDMA_READ) {
3950 				assert(rqpair->current_read_depth > 0);
3951 				rqpair->current_read_depth--;
3952 			}
3953 			break;
3954 		case RDMA_WR_TYPE_SEND:
3955 			cur_rdma_req = SPDK_CONTAINEROF(bad_rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
3956 			break;
3957 		default:
3958 			SPDK_ERRLOG("Found a RECV in the list of pending SEND requests for qpair %p\n", rqpair);
3959 			prev_rdma_req = cur_rdma_req;
3960 			continue;
3961 		}
3962 
3963 		if (prev_rdma_req == cur_rdma_req) {
3964 			/* this request was handled by an earlier wr. i.e. we were performing an nvme read. */
3965 			/* We only have to check against prev_wr since each requests wrs are contiguous in this list. */
3966 			continue;
3967 		}
3968 
3969 		switch (cur_rdma_req->state) {
3970 		case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
3971 			cur_rdma_req->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
3972 			cur_rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
3973 			break;
3974 		case RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST:
3975 		case RDMA_REQUEST_STATE_COMPLETING:
3976 			cur_rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
3977 			break;
3978 		default:
3979 			SPDK_ERRLOG("Found a request in a bad state %d when draining pending SEND requests for qpair %p\n",
3980 				    cur_rdma_req->state, rqpair);
3981 			continue;
3982 		}
3983 
3984 		nvmf_rdma_request_process(rtransport, cur_rdma_req);
3985 		prev_rdma_req = cur_rdma_req;
3986 	}
3987 
3988 	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
3989 		/* Disconnect the connection. */
3990 		spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
3991 	}
3992 
3993 }
3994 
3995 static void
3996 _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,
3997 		     struct spdk_nvmf_rdma_poller *rpoller)
3998 {
3999 	struct spdk_nvmf_rdma_qpair	*rqpair;
4000 	struct ibv_send_wr		*bad_wr = NULL;
4001 	int				rc;
4002 
4003 	while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
4004 		rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
4005 		rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_wr);
4006 
4007 		/* bad wr always points to the first wr that failed. */
4008 		if (rc) {
4009 			_qp_reset_failed_sends(rtransport, rqpair, bad_wr, rc);
4010 		}
4011 		STAILQ_REMOVE_HEAD(&rpoller->qpairs_pending_send, send_link);
4012 	}
4013 }
4014 
4015 static const char *
4016 nvmf_rdma_wr_type_str(enum spdk_nvmf_rdma_wr_type wr_type)
4017 {
4018 	switch (wr_type) {
4019 	case RDMA_WR_TYPE_RECV:
4020 		return "RECV";
4021 	case RDMA_WR_TYPE_SEND:
4022 		return "SEND";
4023 	case RDMA_WR_TYPE_DATA:
4024 		return "DATA";
4025 	default:
4026 		SPDK_ERRLOG("Unknown WR type %d\n", wr_type);
4027 		SPDK_UNREACHABLE();
4028 	}
4029 }
4030 
4031 static inline void
4032 nvmf_rdma_log_wc_status(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_wc *wc)
4033 {
4034 	enum spdk_nvmf_rdma_wr_type wr_type = ((struct spdk_nvmf_rdma_wr *)wc->wr_id)->type;
4035 
4036 	if (wc->status == IBV_WC_WR_FLUSH_ERR) {
4037 		/* If qpair is in ERR state, we will receive completions for all posted and not completed
4038 		 * Work Requests with IBV_WC_WR_FLUSH_ERR status. Don't log an error in that case */
4039 		SPDK_DEBUGLOG(rdma,
4040 			      "Error on CQ %p, (qp state %d ibv_state %d) request 0x%lu, type %s, status: (%d): %s\n",
4041 			      rqpair->poller->cq, rqpair->qpair.state, rqpair->ibv_state, wc->wr_id,
4042 			      nvmf_rdma_wr_type_str(wr_type), wc->status, ibv_wc_status_str(wc->status));
4043 	} else {
4044 		SPDK_ERRLOG("Error on CQ %p, (qp state %d ibv_state %d) request 0x%lu, type %s, status: (%d): %s\n",
4045 			    rqpair->poller->cq, rqpair->qpair.state, rqpair->ibv_state, wc->wr_id,
4046 			    nvmf_rdma_wr_type_str(wr_type), wc->status, ibv_wc_status_str(wc->status));
4047 	}
4048 }
4049 
4050 static int
4051 nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
4052 		      struct spdk_nvmf_rdma_poller *rpoller)
4053 {
4054 	struct ibv_wc wc[32];
4055 	struct spdk_nvmf_rdma_wr	*rdma_wr;
4056 	struct spdk_nvmf_rdma_request	*rdma_req;
4057 	struct spdk_nvmf_rdma_recv	*rdma_recv;
4058 	struct spdk_nvmf_rdma_qpair	*rqpair;
4059 	int reaped, i;
4060 	int count = 0;
4061 	bool error = false;
4062 	uint64_t poll_tsc = spdk_get_ticks();
4063 
4064 	/* Poll for completing operations. */
4065 	reaped = ibv_poll_cq(rpoller->cq, 32, wc);
4066 	if (reaped < 0) {
4067 		SPDK_ERRLOG("Error polling CQ! (%d): %s\n",
4068 			    errno, spdk_strerror(errno));
4069 		return -1;
4070 	} else if (reaped == 0) {
4071 		rpoller->stat.idle_polls++;
4072 	}
4073 
4074 	rpoller->stat.polls++;
4075 	rpoller->stat.completions += reaped;
4076 
4077 	for (i = 0; i < reaped; i++) {
4078 
4079 		rdma_wr = (struct spdk_nvmf_rdma_wr *)wc[i].wr_id;
4080 
4081 		switch (rdma_wr->type) {
4082 		case RDMA_WR_TYPE_SEND:
4083 			rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, rsp.rdma_wr);
4084 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
4085 
4086 			if (!wc[i].status) {
4087 				count++;
4088 				assert(wc[i].opcode == IBV_WC_SEND);
4089 				assert(nvmf_rdma_req_is_completing(rdma_req));
4090 			}
4091 
4092 			rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
4093 			/* RDMA_WRITE operation completed. +1 since it was chained with rsp WR */
4094 			rqpair->current_send_depth -= rdma_req->num_outstanding_data_wr + 1;
4095 			rdma_req->num_outstanding_data_wr = 0;
4096 
4097 			nvmf_rdma_request_process(rtransport, rdma_req);
4098 			break;
4099 		case RDMA_WR_TYPE_RECV:
4100 			/* rdma_recv->qpair will be invalid if using an SRQ.  In that case we have to get the qpair from the wc. */
4101 			rdma_recv = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_recv, rdma_wr);
4102 			if (rpoller->srq != NULL) {
4103 				rdma_recv->qpair = get_rdma_qpair_from_wc(rpoller, &wc[i]);
4104 				/* It is possible that there are still some completions for destroyed QP
4105 				 * associated with SRQ. We just ignore these late completions and re-post
4106 				 * receive WRs back to SRQ.
4107 				 */
4108 				if (spdk_unlikely(NULL == rdma_recv->qpair)) {
4109 					struct ibv_recv_wr *bad_wr;
4110 					int rc;
4111 
4112 					rdma_recv->wr.next = NULL;
4113 					spdk_rdma_srq_queue_recv_wrs(rpoller->srq, &rdma_recv->wr);
4114 					rc = spdk_rdma_srq_flush_recv_wrs(rpoller->srq, &bad_wr);
4115 					if (rc) {
4116 						SPDK_ERRLOG("Failed to re-post recv WR to SRQ, err %d\n", rc);
4117 					}
4118 					continue;
4119 				}
4120 			}
4121 			rqpair = rdma_recv->qpair;
4122 
4123 			assert(rqpair != NULL);
4124 			if (!wc[i].status) {
4125 				assert(wc[i].opcode == IBV_WC_RECV);
4126 				if (rqpair->current_recv_depth >= rqpair->max_queue_depth) {
4127 					spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
4128 					break;
4129 				}
4130 			}
4131 
4132 			rdma_recv->wr.next = NULL;
4133 			rqpair->current_recv_depth++;
4134 			rdma_recv->receive_tsc = poll_tsc;
4135 			rpoller->stat.requests++;
4136 			STAILQ_INSERT_HEAD(&rqpair->resources->incoming_queue, rdma_recv, link);
4137 			break;
4138 		case RDMA_WR_TYPE_DATA:
4139 			rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, data.rdma_wr);
4140 			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
4141 
4142 			assert(rdma_req->num_outstanding_data_wr > 0);
4143 
4144 			rqpair->current_send_depth--;
4145 			rdma_req->num_outstanding_data_wr--;
4146 			if (!wc[i].status) {
4147 				assert(wc[i].opcode == IBV_WC_RDMA_READ);
4148 				rqpair->current_read_depth--;
4149 				/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
4150 				if (rdma_req->num_outstanding_data_wr == 0) {
4151 					rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
4152 					nvmf_rdma_request_process(rtransport, rdma_req);
4153 				}
4154 			} else {
4155 				/* If the data transfer fails still force the queue into the error state,
4156 				 * if we were performing an RDMA_READ, we need to force the request into a
4157 				 * completed state since it wasn't linked to a send. However, in the RDMA_WRITE
4158 				 * case, we should wait for the SEND to complete. */
4159 				if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
4160 					rqpair->current_read_depth--;
4161 					if (rdma_req->num_outstanding_data_wr == 0) {
4162 						rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
4163 					}
4164 				}
4165 			}
4166 			break;
4167 		default:
4168 			SPDK_ERRLOG("Received an unknown opcode on the CQ: %d\n", wc[i].opcode);
4169 			continue;
4170 		}
4171 
4172 		/* Handle error conditions */
4173 		if (wc[i].status) {
4174 			nvmf_rdma_update_ibv_state(rqpair);
4175 			nvmf_rdma_log_wc_status(rqpair, &wc[i]);
4176 
4177 			error = true;
4178 
4179 			if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
4180 				/* Disconnect the connection. */
4181 				spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
4182 			} else {
4183 				nvmf_rdma_destroy_drained_qpair(rqpair);
4184 			}
4185 			continue;
4186 		}
4187 
4188 		nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);
4189 
4190 		if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
4191 			nvmf_rdma_destroy_drained_qpair(rqpair);
4192 		}
4193 	}
4194 
4195 	if (error == true) {
4196 		return -1;
4197 	}
4198 
4199 	/* submit outstanding work requests. */
4200 	_poller_submit_recvs(rtransport, rpoller);
4201 	_poller_submit_sends(rtransport, rpoller);
4202 
4203 	return count;
4204 }
4205 
4206 static int
4207 nvmf_rdma_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
4208 {
4209 	struct spdk_nvmf_rdma_transport *rtransport;
4210 	struct spdk_nvmf_rdma_poll_group *rgroup;
4211 	struct spdk_nvmf_rdma_poller	*rpoller;
4212 	int				count, rc;
4213 
4214 	rtransport = SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_rdma_transport, transport);
4215 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
4216 
4217 	count = 0;
4218 	TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
4219 		rc = nvmf_rdma_poller_poll(rtransport, rpoller);
4220 		if (rc < 0) {
4221 			return rc;
4222 		}
4223 		count += rc;
4224 	}
4225 
4226 	return count;
4227 }
4228 
4229 static int
4230 nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
4231 			  struct spdk_nvme_transport_id *trid,
4232 			  bool peer)
4233 {
4234 	struct sockaddr *saddr;
4235 	uint16_t port;
4236 
4237 	spdk_nvme_trid_populate_transport(trid, SPDK_NVME_TRANSPORT_RDMA);
4238 
4239 	if (peer) {
4240 		saddr = rdma_get_peer_addr(id);
4241 	} else {
4242 		saddr = rdma_get_local_addr(id);
4243 	}
4244 	switch (saddr->sa_family) {
4245 	case AF_INET: {
4246 		struct sockaddr_in *saddr_in = (struct sockaddr_in *)saddr;
4247 
4248 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV4;
4249 		inet_ntop(AF_INET, &saddr_in->sin_addr,
4250 			  trid->traddr, sizeof(trid->traddr));
4251 		if (peer) {
4252 			port = ntohs(rdma_get_dst_port(id));
4253 		} else {
4254 			port = ntohs(rdma_get_src_port(id));
4255 		}
4256 		snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port);
4257 		break;
4258 	}
4259 	case AF_INET6: {
4260 		struct sockaddr_in6 *saddr_in = (struct sockaddr_in6 *)saddr;
4261 		trid->adrfam = SPDK_NVMF_ADRFAM_IPV6;
4262 		inet_ntop(AF_INET6, &saddr_in->sin6_addr,
4263 			  trid->traddr, sizeof(trid->traddr));
4264 		if (peer) {
4265 			port = ntohs(rdma_get_dst_port(id));
4266 		} else {
4267 			port = ntohs(rdma_get_src_port(id));
4268 		}
4269 		snprintf(trid->trsvcid, sizeof(trid->trsvcid), "%u", port);
4270 		break;
4271 	}
4272 	default:
4273 		return -1;
4274 
4275 	}
4276 
4277 	return 0;
4278 }
4279 
4280 static int
4281 nvmf_rdma_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
4282 			      struct spdk_nvme_transport_id *trid)
4283 {
4284 	struct spdk_nvmf_rdma_qpair	*rqpair;
4285 
4286 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
4287 
4288 	return nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, true);
4289 }
4290 
4291 static int
4292 nvmf_rdma_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
4293 			       struct spdk_nvme_transport_id *trid)
4294 {
4295 	struct spdk_nvmf_rdma_qpair	*rqpair;
4296 
4297 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
4298 
4299 	return nvmf_rdma_trid_from_cm_id(rqpair->cm_id, trid, false);
4300 }
4301 
4302 static int
4303 nvmf_rdma_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
4304 				struct spdk_nvme_transport_id *trid)
4305 {
4306 	struct spdk_nvmf_rdma_qpair	*rqpair;
4307 
4308 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
4309 
4310 	return nvmf_rdma_trid_from_cm_id(rqpair->listen_id, trid, false);
4311 }
4312 
4313 void
4314 spdk_nvmf_rdma_init_hooks(struct spdk_nvme_rdma_hooks *hooks)
4315 {
4316 	g_nvmf_hooks = *hooks;
4317 }
4318 
4319 static void
4320 nvmf_rdma_request_set_abort_status(struct spdk_nvmf_request *req,
4321 				   struct spdk_nvmf_rdma_request *rdma_req_to_abort)
4322 {
4323 	rdma_req_to_abort->req.rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
4324 	rdma_req_to_abort->req.rsp->nvme_cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
4325 
4326 	rdma_req_to_abort->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
4327 
4328 	req->rsp->nvme_cpl.cdw0 &= ~1U;	/* Command was successfully aborted. */
4329 }
4330 
4331 static int
4332 _nvmf_rdma_qpair_abort_request(void *ctx)
4333 {
4334 	struct spdk_nvmf_request *req = ctx;
4335 	struct spdk_nvmf_rdma_request *rdma_req_to_abort = SPDK_CONTAINEROF(
4336 				req->req_to_abort, struct spdk_nvmf_rdma_request, req);
4337 	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair,
4338 					      struct spdk_nvmf_rdma_qpair, qpair);
4339 	int rc;
4340 
4341 	spdk_poller_unregister(&req->poller);
4342 
4343 	switch (rdma_req_to_abort->state) {
4344 	case RDMA_REQUEST_STATE_EXECUTING:
4345 		rc = nvmf_ctrlr_abort_request(req);
4346 		if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) {
4347 			return SPDK_POLLER_BUSY;
4348 		}
4349 		break;
4350 
4351 	case RDMA_REQUEST_STATE_NEED_BUFFER:
4352 		STAILQ_REMOVE(&rqpair->poller->group->group.pending_buf_queue,
4353 			      &rdma_req_to_abort->req, spdk_nvmf_request, buf_link);
4354 
4355 		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
4356 		break;
4357 
4358 	case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
4359 		STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req_to_abort,
4360 			      spdk_nvmf_rdma_request, state_link);
4361 
4362 		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
4363 		break;
4364 
4365 	case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
4366 		STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req_to_abort,
4367 			      spdk_nvmf_rdma_request, state_link);
4368 
4369 		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
4370 		break;
4371 
4372 	case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
4373 		if (spdk_get_ticks() < req->timeout_tsc) {
4374 			req->poller = SPDK_POLLER_REGISTER(_nvmf_rdma_qpair_abort_request, req, 0);
4375 			return SPDK_POLLER_BUSY;
4376 		}
4377 		break;
4378 
4379 	default:
4380 		break;
4381 	}
4382 
4383 	spdk_nvmf_request_complete(req);
4384 	return SPDK_POLLER_BUSY;
4385 }
4386 
4387 static void
4388 nvmf_rdma_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
4389 			      struct spdk_nvmf_request *req)
4390 {
4391 	struct spdk_nvmf_rdma_qpair *rqpair;
4392 	struct spdk_nvmf_rdma_transport *rtransport;
4393 	struct spdk_nvmf_transport *transport;
4394 	uint16_t cid;
4395 	uint32_t i, max_req_count;
4396 	struct spdk_nvmf_rdma_request *rdma_req_to_abort = NULL, *rdma_req;
4397 
4398 	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
4399 	rtransport = SPDK_CONTAINEROF(qpair->transport, struct spdk_nvmf_rdma_transport, transport);
4400 	transport = &rtransport->transport;
4401 
4402 	cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;
4403 	max_req_count = rqpair->srq == NULL ? rqpair->max_queue_depth : rqpair->poller->max_srq_depth;
4404 
4405 	for (i = 0; i < max_req_count; i++) {
4406 		rdma_req = &rqpair->resources->reqs[i];
4407 		/* When SRQ == NULL, rqpair has its own requests and req.qpair pointer always points to the qpair
4408 		 * When SRQ != NULL all rqpairs share common requests and qpair pointer is assigned when we start to
4409 		 * process a request. So in both cases all requests which are not in FREE state have valid qpair ptr */
4410 		if (rdma_req->state != RDMA_REQUEST_STATE_FREE && rdma_req->req.cmd->nvme_cmd.cid == cid &&
4411 		    rdma_req->req.qpair == qpair) {
4412 			rdma_req_to_abort = rdma_req;
4413 			break;
4414 		}
4415 	}
4416 
4417 	if (rdma_req_to_abort == NULL) {
4418 		spdk_nvmf_request_complete(req);
4419 		return;
4420 	}
4421 
4422 	req->req_to_abort = &rdma_req_to_abort->req;
4423 	req->timeout_tsc = spdk_get_ticks() +
4424 			   transport->opts.abort_timeout_sec * spdk_get_ticks_hz();
4425 	req->poller = NULL;
4426 
4427 	_nvmf_rdma_qpair_abort_request(req);
4428 }
4429 
4430 static void
4431 nvmf_rdma_poll_group_dump_stat(struct spdk_nvmf_transport_poll_group *group,
4432 			       struct spdk_json_write_ctx *w)
4433 {
4434 	struct spdk_nvmf_rdma_poll_group *rgroup;
4435 	struct spdk_nvmf_rdma_poller *rpoller;
4436 
4437 	assert(w != NULL);
4438 
4439 	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);
4440 
4441 	spdk_json_write_named_uint64(w, "pending_data_buffer", rgroup->stat.pending_data_buffer);
4442 
4443 	spdk_json_write_named_array_begin(w, "devices");
4444 
4445 	TAILQ_FOREACH(rpoller, &rgroup->pollers, link) {
4446 		spdk_json_write_object_begin(w);
4447 		spdk_json_write_named_string(w, "name",
4448 					     ibv_get_device_name(rpoller->device->context->device));
4449 		spdk_json_write_named_uint64(w, "polls",
4450 					     rpoller->stat.polls);
4451 		spdk_json_write_named_uint64(w, "idle_polls",
4452 					     rpoller->stat.idle_polls);
4453 		spdk_json_write_named_uint64(w, "completions",
4454 					     rpoller->stat.completions);
4455 		spdk_json_write_named_uint64(w, "requests",
4456 					     rpoller->stat.requests);
4457 		spdk_json_write_named_uint64(w, "request_latency",
4458 					     rpoller->stat.request_latency);
4459 		spdk_json_write_named_uint64(w, "pending_free_request",
4460 					     rpoller->stat.pending_free_request);
4461 		spdk_json_write_named_uint64(w, "pending_rdma_read",
4462 					     rpoller->stat.pending_rdma_read);
4463 		spdk_json_write_named_uint64(w, "pending_rdma_write",
4464 					     rpoller->stat.pending_rdma_write);
4465 		spdk_json_write_named_uint64(w, "total_send_wrs",
4466 					     rpoller->stat.qp_stats.send.num_submitted_wrs);
4467 		spdk_json_write_named_uint64(w, "send_doorbell_updates",
4468 					     rpoller->stat.qp_stats.send.doorbell_updates);
4469 		spdk_json_write_named_uint64(w, "total_recv_wrs",
4470 					     rpoller->stat.qp_stats.recv.num_submitted_wrs);
4471 		spdk_json_write_named_uint64(w, "recv_doorbell_updates",
4472 					     rpoller->stat.qp_stats.recv.doorbell_updates);
4473 		spdk_json_write_object_end(w);
4474 	}
4475 
4476 	spdk_json_write_array_end(w);
4477 }
4478 
4479 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
4480 	.name = "RDMA",
4481 	.type = SPDK_NVME_TRANSPORT_RDMA,
4482 	.opts_init = nvmf_rdma_opts_init,
4483 	.create = nvmf_rdma_create,
4484 	.dump_opts = nvmf_rdma_dump_opts,
4485 	.destroy = nvmf_rdma_destroy,
4486 
4487 	.listen = nvmf_rdma_listen,
4488 	.stop_listen = nvmf_rdma_stop_listen,
4489 	.cdata_init = nvmf_rdma_cdata_init,
4490 
4491 	.listener_discover = nvmf_rdma_discover,
4492 
4493 	.poll_group_create = nvmf_rdma_poll_group_create,
4494 	.get_optimal_poll_group = nvmf_rdma_get_optimal_poll_group,
4495 	.poll_group_destroy = nvmf_rdma_poll_group_destroy,
4496 	.poll_group_add = nvmf_rdma_poll_group_add,
4497 	.poll_group_remove = nvmf_rdma_poll_group_remove,
4498 	.poll_group_poll = nvmf_rdma_poll_group_poll,
4499 
4500 	.req_free = nvmf_rdma_request_free,
4501 	.req_complete = nvmf_rdma_request_complete,
4502 
4503 	.qpair_fini = nvmf_rdma_close_qpair,
4504 	.qpair_get_peer_trid = nvmf_rdma_qpair_get_peer_trid,
4505 	.qpair_get_local_trid = nvmf_rdma_qpair_get_local_trid,
4506 	.qpair_get_listen_trid = nvmf_rdma_qpair_get_listen_trid,
4507 	.qpair_abort_request = nvmf_rdma_qpair_abort_request,
4508 
4509 	.poll_group_dump_stat = nvmf_rdma_poll_group_dump_stat,
4510 };
4511 
4512 SPDK_NVMF_TRANSPORT_REGISTER(rdma, &spdk_nvmf_transport_rdma);
4513 SPDK_LOG_REGISTER_COMPONENT(rdma)
4514