xref: /spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c (revision ba20950a539d0b71a20f8a1199cbf759de92e854)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "common/lib/test_env.c"
9 #include "common/lib/test_rdma.c"
10 #include "nvmf/rdma.c"
11 #include "nvmf/transport.c"
12 
13 #define RDMA_UT_UNITS_IN_MAX_IO 16
14 
15 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
16 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
17 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
18 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
19 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
20 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
21 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
22 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
23 };
24 
25 SPDK_LOG_REGISTER_COMPONENT(nvmf)
26 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
27 		uint64_t size, uint64_t translation), 0);
28 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
29 		uint64_t size), 0);
30 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
31 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
32 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
33 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
34 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
35 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0);
36 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
37 
38 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
39 		struct spdk_nvmf_ctrlr_data *cdata));
40 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
41 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
42 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
43 		const struct spdk_nvme_transport_id *trid2), 0);
44 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
45 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
46 		struct spdk_dif_ctx *dif_ctx), false);
47 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
48 		enum spdk_nvme_transport_type trtype));
49 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
50 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
51 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
52 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
53 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
54 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
55 
56 /* ibv_reg_mr can be a macro, need to undefine it */
57 #ifdef ibv_reg_mr
58 #undef ibv_reg_mr
59 #endif
60 
61 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
62 struct ibv_mr *
63 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
64 {
65 	HANDLE_RETURN_MOCK(ibv_reg_mr);
66 	if (length > 0) {
67 		return &g_rdma_mr;
68 	} else {
69 		return NULL;
70 	}
71 }
72 
73 int
74 ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
75 	     int attr_mask, struct ibv_qp_init_attr *init_attr)
76 {
77 	if (qp == NULL) {
78 		return -1;
79 	} else {
80 		attr->port_num = 80;
81 
82 		if (qp->state == IBV_QPS_ERR) {
83 			attr->qp_state = 10;
84 		} else {
85 			attr->qp_state = IBV_QPS_INIT;
86 		}
87 
88 		return 0;
89 	}
90 }
91 
92 const char *
93 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
94 {
95 	switch (trtype) {
96 	case SPDK_NVME_TRANSPORT_PCIE:
97 		return "PCIe";
98 	case SPDK_NVME_TRANSPORT_RDMA:
99 		return "RDMA";
100 	case SPDK_NVME_TRANSPORT_FC:
101 		return "FC";
102 	default:
103 		return NULL;
104 	}
105 }
106 
107 int
108 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
109 {
110 	int len, i;
111 
112 	if (trstring == NULL) {
113 		return -EINVAL;
114 	}
115 
116 	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
117 	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
118 		return -EINVAL;
119 	}
120 
121 	/* cast official trstring to uppercase version of input. */
122 	for (i = 0; i < len; i++) {
123 		trid->trstring[i] = toupper(trstring[i]);
124 	}
125 	return 0;
126 }
127 
128 static void
129 reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
130 {
131 	int i;
132 
133 	rdma_req->req.length = 0;
134 	rdma_req->req.data_from_pool = false;
135 	rdma_req->data.wr.num_sge = 0;
136 	rdma_req->data.wr.wr.rdma.remote_addr = 0;
137 	rdma_req->data.wr.wr.rdma.rkey = 0;
138 	rdma_req->offset = 0;
139 	memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
140 
141 	for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
142 		rdma_req->req.iov[i].iov_base = 0;
143 		rdma_req->req.iov[i].iov_len = 0;
144 		rdma_req->req.buffers[i] = 0;
145 		rdma_req->data.wr.sg_list[i].addr = 0;
146 		rdma_req->data.wr.sg_list[i].length = 0;
147 		rdma_req->data.wr.sg_list[i].lkey = 0;
148 	}
149 	rdma_req->req.iovcnt = 0;
150 	if (rdma_req->req.stripped_data) {
151 		free(rdma_req->req.stripped_data);
152 		rdma_req->req.stripped_data = NULL;
153 	}
154 }
155 
156 static void
157 test_spdk_nvmf_rdma_request_parse_sgl(void)
158 {
159 	struct spdk_nvmf_rdma_transport rtransport;
160 	struct spdk_nvmf_rdma_device device;
161 	struct spdk_nvmf_rdma_request rdma_req = {};
162 	struct spdk_nvmf_rdma_recv recv;
163 	struct spdk_nvmf_rdma_poll_group group;
164 	struct spdk_nvmf_rdma_qpair rqpair;
165 	struct spdk_nvmf_rdma_poller poller;
166 	union nvmf_c2h_msg cpl;
167 	union nvmf_h2c_msg cmd;
168 	struct spdk_nvme_sgl_descriptor *sgl;
169 	struct spdk_nvmf_transport_pg_cache_buf bufs[4];
170 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
171 	struct spdk_nvmf_rdma_request_data data;
172 	int rc, i;
173 	uint32_t sgl_length;
174 	uintptr_t aligned_buffer_address;
175 
176 	data.wr.sg_list = data.sgl;
177 	STAILQ_INIT(&group.group.buf_cache);
178 	group.group.buf_cache_size = 0;
179 	group.group.buf_cache_count = 0;
180 	group.group.transport = &rtransport.transport;
181 	poller.group = &group;
182 	rqpair.poller = &poller;
183 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
184 
185 	sgl = &cmd.nvme_cmd.dptr.sgl1;
186 	rdma_req.recv = &recv;
187 	rdma_req.req.cmd = &cmd;
188 	rdma_req.req.rsp = &cpl;
189 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
190 	rdma_req.req.qpair = &rqpair.qpair;
191 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
192 
193 	rtransport.transport.opts = g_rdma_ut_transport_opts;
194 	rtransport.data_wr_pool = NULL;
195 	rtransport.transport.data_buf_pool = NULL;
196 
197 	device.attr.device_cap_flags = 0;
198 	sgl->keyed.key = 0xEEEE;
199 	sgl->address = 0xFFFF;
200 	rdma_req.recv->buf = (void *)0xDDDD;
201 
202 	/* Test 1: sgl type: keyed data block subtype: address */
203 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
204 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
205 
206 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
207 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
208 	reset_nvmf_rdma_request(&rdma_req);
209 	sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
210 
211 	device.map = (void *)0x0;
212 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
213 	CU_ASSERT(rc == 0);
214 	CU_ASSERT(rdma_req.req.data_from_pool == true);
215 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
216 	CU_ASSERT((uint64_t)rdma_req.req.iovcnt == 1);
217 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
218 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
219 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
220 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
221 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
222 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
223 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
224 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
225 
226 	/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
227 	reset_nvmf_rdma_request(&rdma_req);
228 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
229 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
230 
231 	CU_ASSERT(rc == 0);
232 	CU_ASSERT(rdma_req.req.data_from_pool == true);
233 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
234 	CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
235 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
236 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
237 	for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
238 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
239 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
240 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
241 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
242 	}
243 
244 	/* Part 3: simple I/O one SGL larger than the transport max io size */
245 	reset_nvmf_rdma_request(&rdma_req);
246 	sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
247 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
248 
249 	CU_ASSERT(rc == -1);
250 
251 	/* Part 4: Pretend there are no buffer pools */
252 	MOCK_SET(spdk_mempool_get, NULL);
253 	reset_nvmf_rdma_request(&rdma_req);
254 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
255 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
256 
257 	CU_ASSERT(rc == 0);
258 	CU_ASSERT(rdma_req.req.data_from_pool == false);
259 	CU_ASSERT(rdma_req.req.iovcnt == 0);
260 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
261 	CU_ASSERT(rdma_req.req.buffers[0] == NULL);
262 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
263 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
264 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
265 
266 	rdma_req.recv->buf = (void *)0xDDDD;
267 	/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
268 	sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
269 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
270 
271 	/* Part 1: Normal I/O smaller than in capsule data size no offset */
272 	reset_nvmf_rdma_request(&rdma_req);
273 	sgl->address = 0;
274 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
275 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
276 
277 	CU_ASSERT(rc == 0);
278 	CU_ASSERT(rdma_req.req.iovcnt == 1);
279 	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0xDDDD);
280 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
281 	CU_ASSERT(rdma_req.req.data_from_pool == false);
282 
283 	/* Part 2: I/O offset + length too large */
284 	reset_nvmf_rdma_request(&rdma_req);
285 	sgl->address = rtransport.transport.opts.in_capsule_data_size;
286 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
287 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
288 
289 	CU_ASSERT(rc == -1);
290 
291 	/* Part 3: I/O too large */
292 	reset_nvmf_rdma_request(&rdma_req);
293 	sgl->address = 0;
294 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
295 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
296 
297 	CU_ASSERT(rc == -1);
298 
299 	/* Test 3: Multi SGL */
300 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
301 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
302 	sgl->address = 0;
303 	rdma_req.recv->buf = (void *)&sgl_desc;
304 	MOCK_SET(spdk_mempool_get, &data);
305 
306 	/* part 1: 2 segments each with 1 wr. */
307 	reset_nvmf_rdma_request(&rdma_req);
308 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
309 	for (i = 0; i < 2; i++) {
310 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
311 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
312 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
313 		sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
314 		sgl_desc[i].keyed.key = 0x44;
315 	}
316 
317 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
318 
319 	CU_ASSERT(rc == 0);
320 	CU_ASSERT(rdma_req.req.data_from_pool == true);
321 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
322 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
323 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
324 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
325 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
326 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
327 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
328 	CU_ASSERT(data.wr.num_sge == 1);
329 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
330 
331 	/* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
332 	reset_nvmf_rdma_request(&rdma_req);
333 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
334 	for (i = 0; i < 2; i++) {
335 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
336 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
337 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
338 		sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
339 		sgl_desc[i].keyed.key = 0x44;
340 	}
341 
342 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
343 
344 	CU_ASSERT(rc == 0);
345 	CU_ASSERT(rdma_req.req.data_from_pool == true);
346 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
347 	CU_ASSERT(rdma_req.req.iovcnt == 16);
348 	CU_ASSERT(rdma_req.data.wr.num_sge == 8);
349 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
350 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
351 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
352 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
353 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
354 	CU_ASSERT(data.wr.num_sge == 8);
355 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
356 
357 	/* part 3: 2 segments, one very large, one very small */
358 	reset_nvmf_rdma_request(&rdma_req);
359 	for (i = 0; i < 2; i++) {
360 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
361 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
362 		sgl_desc[i].keyed.key = 0x44;
363 	}
364 
365 	sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
366 				   rtransport.transport.opts.io_unit_size / 2;
367 	sgl_desc[0].address = 0x4000;
368 	sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
369 	sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
370 			      rtransport.transport.opts.io_unit_size / 2;
371 
372 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
373 
374 	CU_ASSERT(rc == 0);
375 	CU_ASSERT(rdma_req.req.data_from_pool == true);
376 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
377 	CU_ASSERT(rdma_req.req.iovcnt == 16);
378 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
379 	for (i = 0; i < 15; i++) {
380 		CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
381 	}
382 	CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
383 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
384 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
385 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
386 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
387 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
388 		  rtransport.transport.opts.io_unit_size / 2);
389 	CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
390 	CU_ASSERT(data.wr.num_sge == 1);
391 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
392 
393 	/* part 4: 2 SGL descriptors, each length is transport buffer / 2
394 	 * 1 transport buffers should be allocated */
395 	reset_nvmf_rdma_request(&rdma_req);
396 	aligned_buffer_address = ((uintptr_t)(&data) + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK;
397 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
398 	sgl_length = rtransport.transport.opts.io_unit_size / 2;
399 	for (i = 0; i < 2; i++) {
400 		sgl_desc[i].keyed.length = sgl_length;
401 		sgl_desc[i].address = 0x4000 + i * sgl_length;
402 	}
403 
404 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
405 
406 	CU_ASSERT(rc == 0);
407 	CU_ASSERT(rdma_req.req.data_from_pool == true);
408 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size);
409 	CU_ASSERT(rdma_req.req.iovcnt == 1);
410 
411 	CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length);
412 	/* We mocked mempool_get to return address of data variable. Mempool is used
413 	 * to get both additional WRs and data buffers, so data points to &data */
414 	CU_ASSERT(rdma_req.data.sgl[0].addr == aligned_buffer_address);
415 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
416 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
417 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
418 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
419 
420 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
421 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length);
422 	CU_ASSERT(data.sgl[0].length == sgl_length);
423 	CU_ASSERT(data.sgl[0].addr == aligned_buffer_address + sgl_length);
424 	CU_ASSERT(data.wr.num_sge == 1);
425 
426 	/* Test 4: use PG buffer cache */
427 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
428 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
429 	sgl->address = 0xFFFF;
430 	rdma_req.recv->buf = (void *)0xDDDD;
431 	sgl->keyed.key = 0xEEEE;
432 
433 	for (i = 0; i < 4; i++) {
434 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
435 	}
436 
437 	/* part 1: use the four buffers from the pg cache */
438 	group.group.buf_cache_size = 4;
439 	group.group.buf_cache_count = 4;
440 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
441 	reset_nvmf_rdma_request(&rdma_req);
442 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
443 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
444 
445 	SPDK_CU_ASSERT_FATAL(rc == 0);
446 	CU_ASSERT(rdma_req.req.data_from_pool == true);
447 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
448 	CU_ASSERT(rdma_req.req.iovcnt == 4);
449 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
450 			~NVMF_DATA_BUFFER_MASK));
451 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
452 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
453 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
454 	CU_ASSERT(group.group.buf_cache_count == 0);
455 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
456 	for (i = 0; i < 4; i++) {
457 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
458 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
459 				~NVMF_DATA_BUFFER_MASK));
460 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
461 	}
462 
463 	/* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
464 	reset_nvmf_rdma_request(&rdma_req);
465 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
466 
467 	SPDK_CU_ASSERT_FATAL(rc == 0);
468 	CU_ASSERT(rdma_req.req.data_from_pool == true);
469 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
470 	CU_ASSERT(rdma_req.req.iovcnt == 4);
471 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
472 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
473 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
474 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
475 	CU_ASSERT(group.group.buf_cache_count == 0);
476 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
477 	for (i = 0; i < 4; i++) {
478 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
479 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
480 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
481 		CU_ASSERT(group.group.buf_cache_count == 0);
482 	}
483 
484 	/* part 3: half and half */
485 	group.group.buf_cache_count = 2;
486 
487 	for (i = 0; i < 2; i++) {
488 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
489 	}
490 	reset_nvmf_rdma_request(&rdma_req);
491 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
492 
493 	SPDK_CU_ASSERT_FATAL(rc == 0);
494 	CU_ASSERT(rdma_req.req.data_from_pool == true);
495 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
496 	CU_ASSERT(rdma_req.req.iovcnt == 4);
497 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
498 			~NVMF_DATA_BUFFER_MASK));
499 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
500 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
501 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
502 	CU_ASSERT(group.group.buf_cache_count == 0);
503 	for (i = 0; i < 2; i++) {
504 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
505 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
506 				~NVMF_DATA_BUFFER_MASK));
507 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
508 	}
509 	for (i = 2; i < 4; i++) {
510 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
511 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
512 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
513 	}
514 
515 	reset_nvmf_rdma_request(&rdma_req);
516 }
517 
518 static struct spdk_nvmf_rdma_recv *
519 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
520 {
521 	struct spdk_nvmf_rdma_recv *rdma_recv;
522 	union nvmf_h2c_msg *cmd;
523 	struct spdk_nvme_sgl_descriptor *sgl;
524 
525 	rdma_recv = calloc(1, sizeof(*rdma_recv));
526 	rdma_recv->qpair = rqpair;
527 	cmd = calloc(1, sizeof(*cmd));
528 	rdma_recv->sgl[0].addr = (uintptr_t)cmd;
529 	cmd->nvme_cmd.opc = opc;
530 	sgl = &cmd->nvme_cmd.dptr.sgl1;
531 	sgl->keyed.key = 0xEEEE;
532 	sgl->address = 0xFFFF;
533 	sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
534 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
535 	sgl->keyed.length = 1;
536 
537 	return rdma_recv;
538 }
539 
540 static void
541 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
542 {
543 	free((void *)rdma_recv->sgl[0].addr);
544 	free(rdma_recv);
545 }
546 
547 static struct spdk_nvmf_rdma_request *
548 create_req(struct spdk_nvmf_rdma_qpair *rqpair,
549 	   struct spdk_nvmf_rdma_recv *rdma_recv)
550 {
551 	struct spdk_nvmf_rdma_request *rdma_req;
552 	union nvmf_c2h_msg *cpl;
553 
554 	rdma_req = calloc(1, sizeof(*rdma_req));
555 	rdma_req->recv = rdma_recv;
556 	rdma_req->req.qpair = &rqpair->qpair;
557 	rdma_req->state = RDMA_REQUEST_STATE_NEW;
558 	rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
559 	rdma_req->data.wr.sg_list = rdma_req->data.sgl;
560 	cpl = calloc(1, sizeof(*cpl));
561 	rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
562 	rdma_req->req.rsp = cpl;
563 
564 	return rdma_req;
565 }
566 
567 static void
568 free_req(struct spdk_nvmf_rdma_request *rdma_req)
569 {
570 	free((void *)rdma_req->rsp.sgl[0].addr);
571 	free(rdma_req);
572 }
573 
574 static void
575 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
576 	    struct spdk_nvmf_rdma_poller *poller,
577 	    struct spdk_nvmf_rdma_device *device,
578 	    struct spdk_nvmf_rdma_resources *resources,
579 	    struct spdk_nvmf_transport *transport)
580 {
581 	memset(rqpair, 0, sizeof(*rqpair));
582 	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
583 	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
584 	rqpair->poller = poller;
585 	rqpair->device = device;
586 	rqpair->resources = resources;
587 	rqpair->qpair.qid = 1;
588 	rqpair->ibv_state = IBV_QPS_RTS;
589 	rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
590 	rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
591 	rqpair->max_send_depth = 16;
592 	rqpair->max_read_depth = 16;
593 	rqpair->qpair.transport = transport;
594 }
595 
596 static void
597 poller_reset(struct spdk_nvmf_rdma_poller *poller,
598 	     struct spdk_nvmf_rdma_poll_group *group)
599 {
600 	memset(poller, 0, sizeof(*poller));
601 	STAILQ_INIT(&poller->qpairs_pending_recv);
602 	STAILQ_INIT(&poller->qpairs_pending_send);
603 	poller->group = group;
604 }
605 
606 static void
607 test_spdk_nvmf_rdma_request_process(void)
608 {
609 	struct spdk_nvmf_rdma_transport rtransport = {};
610 	struct spdk_nvmf_rdma_poll_group group = {};
611 	struct spdk_nvmf_rdma_poller poller = {};
612 	struct spdk_nvmf_rdma_device device = {};
613 	struct spdk_nvmf_rdma_resources resources = {};
614 	struct spdk_nvmf_rdma_qpair rqpair = {};
615 	struct spdk_nvmf_rdma_recv *rdma_recv;
616 	struct spdk_nvmf_rdma_request *rdma_req;
617 	bool progress;
618 
619 	STAILQ_INIT(&group.group.buf_cache);
620 	STAILQ_INIT(&group.group.pending_buf_queue);
621 	group.group.buf_cache_size = 0;
622 	group.group.buf_cache_count = 0;
623 	poller_reset(&poller, &group);
624 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
625 
626 	rtransport.transport.opts = g_rdma_ut_transport_opts;
627 	rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
628 	rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
629 				  sizeof(struct spdk_nvmf_rdma_request_data),
630 				  0, 0);
631 	MOCK_CLEAR(spdk_mempool_get);
632 
633 	device.attr.device_cap_flags = 0;
634 	device.map = (void *)0x0;
635 
636 	/* Test 1: single SGL READ request */
637 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
638 	rdma_req = create_req(&rqpair, rdma_recv);
639 	rqpair.current_recv_depth = 1;
640 	/* NEW -> EXECUTING */
641 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
642 	CU_ASSERT(progress == true);
643 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
644 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
645 	/* EXECUTED -> TRANSFERRING_C2H */
646 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
647 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
648 	CU_ASSERT(progress == true);
649 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
650 	CU_ASSERT(rdma_req->recv == NULL);
651 	/* COMPLETED -> FREE */
652 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
653 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
654 	CU_ASSERT(progress == true);
655 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
656 
657 	free_recv(rdma_recv);
658 	free_req(rdma_req);
659 	poller_reset(&poller, &group);
660 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
661 
662 	/* Test 2: single SGL WRITE request */
663 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
664 	rdma_req = create_req(&rqpair, rdma_recv);
665 	rqpair.current_recv_depth = 1;
666 	/* NEW -> TRANSFERRING_H2C */
667 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
668 	CU_ASSERT(progress == true);
669 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
670 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
671 	STAILQ_INIT(&poller.qpairs_pending_send);
672 	/* READY_TO_EXECUTE -> EXECUTING */
673 	rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
674 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
675 	CU_ASSERT(progress == true);
676 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
677 	/* EXECUTED -> COMPLETING */
678 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
679 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
680 	CU_ASSERT(progress == true);
681 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
682 	CU_ASSERT(rdma_req->recv == NULL);
683 	/* COMPLETED -> FREE */
684 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
685 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
686 	CU_ASSERT(progress == true);
687 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
688 
689 	free_recv(rdma_recv);
690 	free_req(rdma_req);
691 	poller_reset(&poller, &group);
692 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
693 
694 	/* Test 3: WRITE+WRITE ibv_send batching */
695 	{
696 		struct spdk_nvmf_rdma_recv *recv1, *recv2;
697 		struct spdk_nvmf_rdma_request *req1, *req2;
698 		recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
699 		req1 = create_req(&rqpair, recv1);
700 		recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
701 		req2 = create_req(&rqpair, recv2);
702 
703 		/* WRITE 1: NEW -> TRANSFERRING_H2C */
704 		rqpair.current_recv_depth = 1;
705 		nvmf_rdma_request_process(&rtransport, req1);
706 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
707 
708 		/* WRITE 2: NEW -> TRANSFERRING_H2C */
709 		rqpair.current_recv_depth = 2;
710 		nvmf_rdma_request_process(&rtransport, req2);
711 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
712 
713 		STAILQ_INIT(&poller.qpairs_pending_send);
714 
715 		/* WRITE 1 completes before WRITE 2 has finished RDMA reading */
716 		/* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
717 		req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
718 		nvmf_rdma_request_process(&rtransport, req1);
719 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
720 		/* WRITE 1: EXECUTED -> COMPLETING */
721 		req1->state = RDMA_REQUEST_STATE_EXECUTED;
722 		nvmf_rdma_request_process(&rtransport, req1);
723 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
724 		STAILQ_INIT(&poller.qpairs_pending_send);
725 		/* WRITE 1: COMPLETED -> FREE */
726 		req1->state = RDMA_REQUEST_STATE_COMPLETED;
727 		nvmf_rdma_request_process(&rtransport, req1);
728 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
729 
730 		/* Now WRITE 2 has finished reading and completes */
731 		/* WRITE 2: COMPLETED -> FREE */
732 		/* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
733 		req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
734 		nvmf_rdma_request_process(&rtransport, req2);
735 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
736 		/* WRITE 1: EXECUTED -> COMPLETING */
737 		req2->state = RDMA_REQUEST_STATE_EXECUTED;
738 		nvmf_rdma_request_process(&rtransport, req2);
739 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
740 		STAILQ_INIT(&poller.qpairs_pending_send);
741 		/* WRITE 1: COMPLETED -> FREE */
742 		req2->state = RDMA_REQUEST_STATE_COMPLETED;
743 		nvmf_rdma_request_process(&rtransport, req2);
744 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
745 
746 		free_recv(recv1);
747 		free_req(req1);
748 		free_recv(recv2);
749 		free_req(req2);
750 		poller_reset(&poller, &group);
751 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
752 	}
753 
754 	/* Test 4, invalid command, check xfer type */
755 	{
756 		struct spdk_nvmf_rdma_recv *rdma_recv_inv;
757 		struct spdk_nvmf_rdma_request *rdma_req_inv;
758 		/* construct an opcode that specifies BIDIRECTIONAL transfer */
759 		uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
760 
761 		rdma_recv_inv = create_recv(&rqpair, opc);
762 		rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
763 
764 		/* NEW -> RDMA_REQUEST_STATE_COMPLETING */
765 		rqpair.current_recv_depth = 1;
766 		progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
767 		CU_ASSERT(progress == true);
768 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
769 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
770 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
771 
772 		/* RDMA_REQUEST_STATE_COMPLETED -> FREE */
773 		rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
774 		nvmf_rdma_request_process(&rtransport, rdma_req_inv);
775 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
776 
777 		free_recv(rdma_recv_inv);
778 		free_req(rdma_req_inv);
779 		poller_reset(&poller, &group);
780 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
781 	}
782 
783 	spdk_mempool_free(rtransport.transport.data_buf_pool);
784 	spdk_mempool_free(rtransport.data_wr_pool);
785 }
786 
787 #define TEST_GROUPS_COUNT 5
788 static void
789 test_nvmf_rdma_get_optimal_poll_group(void)
790 {
791 	struct spdk_nvmf_rdma_transport rtransport = {};
792 	struct spdk_nvmf_transport *transport = &rtransport.transport;
793 	struct spdk_nvmf_rdma_qpair rqpair = {};
794 	struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
795 	struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
796 	struct spdk_nvmf_transport_poll_group *result;
797 	struct spdk_nvmf_poll_group group = {};
798 	uint32_t i;
799 
800 	rqpair.qpair.transport = transport;
801 	TAILQ_INIT(&rtransport.poll_groups);
802 
803 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
804 		groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
805 		CU_ASSERT(groups[i] != NULL);
806 		groups[i]->group = &group;
807 		rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
808 		groups[i]->transport = transport;
809 	}
810 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
811 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
812 
813 	/* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
814 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
815 		rqpair.qpair.qid = 0;
816 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
817 		CU_ASSERT(result == groups[i]);
818 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
819 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
820 
821 		rqpair.qpair.qid = 1;
822 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
823 		CU_ASSERT(result == groups[i]);
824 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
825 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
826 	}
827 	/* wrap around, admin/io pg point to the first pg
828 	   Destroy all poll groups except of the last one */
829 	for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
830 		nvmf_rdma_poll_group_destroy(groups[i]);
831 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
832 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
833 	}
834 
835 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
836 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
837 
838 	/* Check that pointers to the next admin/io poll groups are not changed */
839 	rqpair.qpair.qid = 0;
840 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
841 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
842 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
843 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
844 
845 	rqpair.qpair.qid = 1;
846 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
847 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
848 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
849 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
850 
851 	/* Remove the last poll group, check that pointers are NULL */
852 	nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
853 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
854 	CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
855 
856 	/* Request optimal poll group, result must be NULL */
857 	rqpair.qpair.qid = 0;
858 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
859 	CU_ASSERT(result == NULL);
860 
861 	rqpair.qpair.qid = 1;
862 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
863 	CU_ASSERT(result == NULL);
864 }
865 #undef TEST_GROUPS_COUNT
866 
867 static void
868 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
869 {
870 	struct spdk_nvmf_rdma_transport rtransport;
871 	struct spdk_nvmf_rdma_device device;
872 	struct spdk_nvmf_rdma_request rdma_req = {};
873 	struct spdk_nvmf_rdma_recv recv;
874 	struct spdk_nvmf_rdma_poll_group group;
875 	struct spdk_nvmf_rdma_qpair rqpair;
876 	struct spdk_nvmf_rdma_poller poller;
877 	union nvmf_c2h_msg cpl;
878 	union nvmf_h2c_msg cmd;
879 	struct spdk_nvme_sgl_descriptor *sgl;
880 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
881 	char data_buffer[8192];
882 	struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer;
883 	char data2_buffer[8192];
884 	struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
885 	const uint32_t data_bs = 512;
886 	const uint32_t md_size = 8;
887 	int rc, i;
888 	void *aligned_buffer;
889 
890 	data->wr.sg_list = data->sgl;
891 	STAILQ_INIT(&group.group.buf_cache);
892 	group.group.buf_cache_size = 0;
893 	group.group.buf_cache_count = 0;
894 	group.group.transport = &rtransport.transport;
895 	poller.group = &group;
896 	rqpair.poller = &poller;
897 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
898 
899 	sgl = &cmd.nvme_cmd.dptr.sgl1;
900 	rdma_req.recv = &recv;
901 	rdma_req.req.cmd = &cmd;
902 	rdma_req.req.rsp = &cpl;
903 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
904 	rdma_req.req.qpair = &rqpair.qpair;
905 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
906 
907 	rtransport.transport.opts = g_rdma_ut_transport_opts;
908 	rtransport.data_wr_pool = NULL;
909 	rtransport.transport.data_buf_pool = NULL;
910 
911 	device.attr.device_cap_flags = 0;
912 	device.map = NULL;
913 	sgl->keyed.key = 0xEEEE;
914 	sgl->address = 0xFFFF;
915 	rdma_req.recv->buf = (void *)0xDDDD;
916 
917 	/* Test 1: sgl type: keyed data block subtype: address */
918 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
919 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
920 
921 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
922 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
923 	reset_nvmf_rdma_request(&rdma_req);
924 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
925 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
926 			  0, 0, 0, 0, 0);
927 	rdma_req.req.dif_enabled = true;
928 	rtransport.transport.opts.io_unit_size = data_bs * 8;
929 	rdma_req.req.qpair->transport = &rtransport.transport;
930 	sgl->keyed.length = data_bs * 4;
931 
932 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
933 
934 	CU_ASSERT(rc == 0);
935 	CU_ASSERT(rdma_req.req.data_from_pool == true);
936 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
937 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
938 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
939 	CU_ASSERT(rdma_req.req.iovcnt == 1);
940 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
941 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
942 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
943 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
944 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
945 
946 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
947 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
948 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
949 
950 	/* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
951 		block size 512 */
952 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
953 	reset_nvmf_rdma_request(&rdma_req);
954 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
955 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
956 			  0, 0, 0, 0, 0);
957 	rdma_req.req.dif_enabled = true;
958 	rtransport.transport.opts.io_unit_size = data_bs * 4;
959 	sgl->keyed.length = data_bs * 4;
960 
961 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
962 
963 	CU_ASSERT(rc == 0);
964 	CU_ASSERT(rdma_req.req.data_from_pool == true);
965 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
966 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
967 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
968 	CU_ASSERT(rdma_req.req.iovcnt == 2);
969 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
970 	CU_ASSERT(rdma_req.data.wr.num_sge == 5);
971 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
972 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
973 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
974 
975 	for (i = 0; i < 3; ++i) {
976 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
977 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
978 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
979 	}
980 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
981 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
982 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
983 
984 	/* 2nd buffer consumed */
985 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
986 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
987 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
988 
989 	/* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
990 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
991 	reset_nvmf_rdma_request(&rdma_req);
992 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
993 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
994 			  0, 0, 0, 0, 0);
995 	rdma_req.req.dif_enabled = true;
996 	rtransport.transport.opts.io_unit_size = data_bs;
997 	sgl->keyed.length = data_bs;
998 
999 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1000 
1001 	CU_ASSERT(rc == 0);
1002 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1003 	CU_ASSERT(rdma_req.req.length == data_bs);
1004 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1005 	CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
1006 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1007 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
1008 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1009 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1010 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1011 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1012 
1013 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1014 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
1015 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1016 
1017 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1018 	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
1019 	CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
1020 	/* 2nd buffer consumed for metadata */
1021 	CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
1022 	CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
1023 
1024 	/* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
1025 	   block size 512 */
1026 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1027 	reset_nvmf_rdma_request(&rdma_req);
1028 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1029 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1030 			  0, 0, 0, 0, 0);
1031 	rdma_req.req.dif_enabled = true;
1032 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1033 	sgl->keyed.length = data_bs * 4;
1034 
1035 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1036 
1037 	CU_ASSERT(rc == 0);
1038 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1039 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1040 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1041 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1042 	CU_ASSERT(rdma_req.req.iovcnt == 1);
1043 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
1044 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1045 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1046 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1047 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1048 
1049 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1050 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
1051 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1052 
1053 	/* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
1054 	   block size 512 */
1055 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1056 	reset_nvmf_rdma_request(&rdma_req);
1057 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1058 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1059 			  0, 0, 0, 0, 0);
1060 	rdma_req.req.dif_enabled = true;
1061 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
1062 	sgl->keyed.length = data_bs * 4;
1063 
1064 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1065 
1066 	CU_ASSERT(rc == 0);
1067 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1068 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1069 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1070 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1071 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1072 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
1073 	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1074 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1075 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1076 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1077 
1078 	for (i = 0; i < 2; ++i) {
1079 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
1080 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs * 2);
1081 	}
1082 
1083 	/* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
1084 	   block size 512 */
1085 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1086 	reset_nvmf_rdma_request(&rdma_req);
1087 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1088 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1089 			  0, 0, 0, 0, 0);
1090 	rdma_req.req.dif_enabled = true;
1091 	rtransport.transport.opts.io_unit_size = data_bs * 4;
1092 	sgl->keyed.length = data_bs * 6;
1093 
1094 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1095 
1096 	CU_ASSERT(rc == 0);
1097 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1098 	CU_ASSERT(rdma_req.req.length == data_bs * 6);
1099 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1100 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
1101 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1102 	CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000);
1103 	CU_ASSERT(rdma_req.data.wr.num_sge == 7);
1104 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1105 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1106 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1107 
1108 	for (i = 0; i < 3; ++i) {
1109 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1110 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1111 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1112 	}
1113 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
1114 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
1115 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
1116 
1117 	/* 2nd IO buffer consumed */
1118 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
1119 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
1120 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
1121 
1122 	CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
1123 	CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
1124 	CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY);
1125 
1126 	CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
1127 	CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
1128 	CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY);
1129 
1130 	/* Part 7: simple I/O, number of SGL entries exceeds the number of entries
1131 	   one WR can hold. Additional WR is chained */
1132 	MOCK_SET(spdk_mempool_get, data2_buffer);
1133 	aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) &
1134 				  ~NVMF_DATA_BUFFER_MASK);
1135 	reset_nvmf_rdma_request(&rdma_req);
1136 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1137 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1138 			  0, 0, 0, 0, 0);
1139 	rdma_req.req.dif_enabled = true;
1140 	rtransport.transport.opts.io_unit_size = data_bs * 16;
1141 	sgl->keyed.length = data_bs * 16;
1142 
1143 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1144 
1145 	CU_ASSERT(rc == 0);
1146 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1147 	CU_ASSERT(rdma_req.req.length == data_bs * 16);
1148 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1149 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1150 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
1151 	CU_ASSERT(rdma_req.req.iov[0].iov_base == aligned_buffer);
1152 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
1153 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1154 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1155 
1156 	for (i = 0; i < 15; ++i) {
1157 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1158 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1159 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1160 	}
1161 
1162 	/* 8192 - (512 + 8) * 15 = 392 */
1163 	CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1164 	CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392);
1165 	CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1166 
1167 	/* additional wr from pool */
1168 	CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr);
1169 	CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
1170 	CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
1171 	/* 2nd IO buffer */
1172 	CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)aligned_buffer);
1173 	CU_ASSERT(data2->wr.sg_list[0].length == 120);
1174 	CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY);
1175 
1176 	/* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
1177 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1178 	reset_nvmf_rdma_request(&rdma_req);
1179 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1180 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1181 			  0, 0, 0, 0, 0);
1182 	rdma_req.req.dif_enabled = true;
1183 	rtransport.transport.opts.io_unit_size = 516;
1184 	sgl->keyed.length = data_bs * 2;
1185 
1186 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1187 
1188 	CU_ASSERT(rc == 0);
1189 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1190 	CU_ASSERT(rdma_req.req.length == data_bs * 2);
1191 	CU_ASSERT(rdma_req.req.iovcnt == 3);
1192 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1193 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
1194 	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0x2000);
1195 	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1196 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1197 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1198 
1199 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1200 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
1201 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1202 
1203 	/* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
1204 	  is located at the beginning of that buffer */
1205 	CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
1206 	CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
1207 	CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
1208 
1209 	/* Test 2: Multi SGL */
1210 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1211 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1212 	sgl->address = 0;
1213 	rdma_req.recv->buf = (void *)&sgl_desc;
1214 	MOCK_SET(spdk_mempool_get, data_buffer);
1215 	aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) &
1216 				  ~NVMF_DATA_BUFFER_MASK);
1217 
1218 	/* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
1219 	reset_nvmf_rdma_request(&rdma_req);
1220 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1221 			  SPDK_DIF_TYPE1,
1222 			  SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
1223 	rdma_req.req.dif_enabled = true;
1224 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1225 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
1226 
1227 	for (i = 0; i < 2; i++) {
1228 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1229 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1230 		sgl_desc[i].keyed.length = data_bs * 4;
1231 		sgl_desc[i].address = 0x4000 + i * data_bs * 4;
1232 		sgl_desc[i].keyed.key = 0x44;
1233 	}
1234 
1235 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1236 
1237 	CU_ASSERT(rc == 0);
1238 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1239 	CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
1240 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1241 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
1242 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1243 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1244 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs * 4);
1245 
1246 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
1247 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
1248 	CU_ASSERT(rdma_req.data.wr.next == &data->wr);
1249 	CU_ASSERT(data->wr.wr.rdma.rkey == 0x44);
1250 	CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
1251 	CU_ASSERT(data->wr.num_sge == 1);
1252 	CU_ASSERT(data->wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1253 	CU_ASSERT(data->wr.sg_list[0].length == data_bs * 4);
1254 
1255 	CU_ASSERT(data->wr.next == &rdma_req.rsp.wr);
1256 	reset_nvmf_rdma_request(&rdma_req);
1257 }
1258 
1259 static void
1260 test_nvmf_rdma_opts_init(void)
1261 {
1262 	struct spdk_nvmf_transport_opts	opts = {};
1263 
1264 	nvmf_rdma_opts_init(&opts);
1265 	CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH);
1266 	CU_ASSERT(opts.max_qpairs_per_ctrlr ==	SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR);
1267 	CU_ASSERT(opts.in_capsule_data_size ==	SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE);
1268 	CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE);
1269 	CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
1270 	CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH);
1271 	CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS);
1272 	CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE);
1273 	CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP);
1274 	CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC);
1275 	CU_ASSERT(opts.transport_specific == NULL);
1276 }
1277 
1278 static void
1279 test_nvmf_rdma_request_free_data(void)
1280 {
1281 	struct spdk_nvmf_rdma_request rdma_req = {};
1282 	struct spdk_nvmf_rdma_transport rtransport = {};
1283 	struct spdk_nvmf_rdma_request_data *next_request_data = NULL;
1284 
1285 	MOCK_CLEAR(spdk_mempool_get);
1286 	rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
1287 				  SPDK_NVMF_MAX_SGL_ENTRIES,
1288 				  sizeof(struct spdk_nvmf_rdma_request_data),
1289 				  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
1290 				  SPDK_ENV_SOCKET_ID_ANY);
1291 	next_request_data = spdk_mempool_get(rtransport.data_wr_pool);
1292 	SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count ==
1293 			     SPDK_NVMF_MAX_SGL_ENTRIES - 1);
1294 	next_request_data->wr.wr_id = 1;
1295 	next_request_data->wr.num_sge = 2;
1296 	next_request_data->wr.next = NULL;
1297 	rdma_req.data.wr.next = &next_request_data->wr;
1298 	rdma_req.data.wr.wr_id = 1;
1299 	rdma_req.data.wr.num_sge = 2;
1300 
1301 	nvmf_rdma_request_free_data(&rdma_req, &rtransport);
1302 	/* Check if next_request_data put into memory pool */
1303 	CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES);
1304 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
1305 
1306 	spdk_mempool_free(rtransport.data_wr_pool);
1307 }
1308 
1309 static void
1310 test_nvmf_rdma_update_ibv_state(void)
1311 {
1312 	struct spdk_nvmf_rdma_qpair rqpair = {};
1313 	struct spdk_rdma_qp rdma_qp = {};
1314 	struct ibv_qp qp = {};
1315 	int rc = 0;
1316 
1317 	rqpair.rdma_qp = &rdma_qp;
1318 
1319 	/* Case 1: Failed to get updated RDMA queue pair state */
1320 	rqpair.ibv_state = IBV_QPS_INIT;
1321 	rqpair.rdma_qp->qp = NULL;
1322 
1323 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1324 	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1325 
1326 	/* Case 2: Bad state updated */
1327 	rqpair.rdma_qp->qp = &qp;
1328 	qp.state = IBV_QPS_ERR;
1329 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1330 	CU_ASSERT(rqpair.ibv_state == 10);
1331 	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1332 
1333 	/* Case 3: Pass */
1334 	qp.state = IBV_QPS_INIT;
1335 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1336 	CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT);
1337 	CU_ASSERT(rc == IBV_QPS_INIT);
1338 }
1339 
1340 static void
1341 test_nvmf_rdma_resources_create(void)
1342 {
1343 	static struct spdk_nvmf_rdma_resources *rdma_resource;
1344 	struct spdk_nvmf_rdma_resource_opts opts = {};
1345 	struct spdk_nvmf_rdma_qpair qpair = {};
1346 	struct spdk_nvmf_rdma_recv *recv = NULL;
1347 	struct spdk_nvmf_rdma_request *req = NULL;
1348 	const int DEPTH = 128;
1349 
1350 	opts.max_queue_depth = DEPTH;
1351 	opts.in_capsule_data_size = 4096;
1352 	opts.shared = true;
1353 	opts.qpair = &qpair;
1354 
1355 	rdma_resource = nvmf_rdma_resources_create(&opts);
1356 	CU_ASSERT(rdma_resource != NULL);
1357 	/* Just check first and last entry */
1358 	recv = &rdma_resource->recvs[0];
1359 	req = &rdma_resource->reqs[0];
1360 	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1361 	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs));
1362 	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[0]);
1363 	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[0]));
1364 	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1365 	CU_ASSERT(recv->wr.num_sge == 2);
1366 	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[0].rdma_wr);
1367 	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[0].sgl);
1368 	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[0]);
1369 	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[0]);
1370 	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[0]));
1371 	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1372 	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1373 	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].rsp.rdma_wr);
1374 	CU_ASSERT(req->rsp.wr.next == NULL);
1375 	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1376 	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1377 	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[0].rsp.sgl);
1378 	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1379 	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1380 	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].data.rdma_wr);
1381 	CU_ASSERT(req->data.wr.next == NULL);
1382 	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1383 	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[0].data.sgl);
1384 	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1385 	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1386 
1387 	recv = &rdma_resource->recvs[DEPTH - 1];
1388 	req = &rdma_resource->reqs[DEPTH - 1];
1389 	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1390 	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs +
1391 			(DEPTH - 1) * 4096));
1392 	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[DEPTH - 1]);
1393 	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[DEPTH - 1]));
1394 	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1395 	CU_ASSERT(recv->wr.num_sge == 2);
1396 	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[DEPTH - 1].rdma_wr);
1397 	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[DEPTH - 1].sgl);
1398 	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[DEPTH - 1]);
1399 	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[DEPTH - 1]);
1400 	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[DEPTH - 1]));
1401 	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1402 	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1403 	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)
1404 		  &req->rsp.rdma_wr);
1405 	CU_ASSERT(req->rsp.wr.next == NULL);
1406 	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1407 	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1408 	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[DEPTH - 1].rsp.sgl);
1409 	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1410 	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1411 	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)
1412 		  &req->data.rdma_wr);
1413 	CU_ASSERT(req->data.wr.next == NULL);
1414 	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1415 	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[DEPTH - 1].data.sgl);
1416 	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1417 	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1418 
1419 	nvmf_rdma_resources_destroy(rdma_resource);
1420 }
1421 
1422 static void
1423 test_nvmf_rdma_qpair_compare(void)
1424 {
1425 	struct spdk_nvmf_rdma_qpair rqpair1 = {}, rqpair2 = {};
1426 
1427 	rqpair1.qp_num = 0;
1428 	rqpair2.qp_num = UINT32_MAX;
1429 
1430 	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair1, &rqpair2) < 0);
1431 	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair2, &rqpair1) > 0);
1432 }
1433 
1434 static void
1435 test_nvmf_rdma_resize_cq(void)
1436 {
1437 	int rc = -1;
1438 	int tnum_wr = 0;
1439 	int tnum_cqe = 0;
1440 	struct spdk_nvmf_rdma_qpair rqpair = {};
1441 	struct spdk_nvmf_rdma_poller rpoller = {};
1442 	struct spdk_nvmf_rdma_device rdevice = {};
1443 	struct ibv_context ircontext = {};
1444 	struct ibv_device idevice = {};
1445 
1446 	rdevice.context = &ircontext;
1447 	rqpair.poller = &rpoller;
1448 	ircontext.device = &idevice;
1449 
1450 	/* Test1: Current capacity support required size. */
1451 	rpoller.required_num_wr = 10;
1452 	rpoller.num_cqe = 20;
1453 	rqpair.max_queue_depth = 2;
1454 	tnum_wr = rpoller.required_num_wr;
1455 	tnum_cqe = rpoller.num_cqe;
1456 
1457 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1458 	CU_ASSERT(rc == 0);
1459 	CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1460 	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1461 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1462 
1463 	/* Test2: iWARP doesn't support CQ resize. */
1464 	tnum_wr = rpoller.required_num_wr;
1465 	tnum_cqe = rpoller.num_cqe;
1466 	idevice.transport_type = IBV_TRANSPORT_IWARP;
1467 
1468 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1469 	CU_ASSERT(rc == -1);
1470 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1471 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1472 
1473 
1474 	/* Test3: RDMA CQE requirement exceeds device max_cqe limitation. */
1475 	tnum_wr = rpoller.required_num_wr;
1476 	tnum_cqe = rpoller.num_cqe;
1477 	idevice.transport_type = IBV_TRANSPORT_UNKNOWN;
1478 	rdevice.attr.max_cqe = 3;
1479 
1480 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1481 	CU_ASSERT(rc == -1);
1482 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1483 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1484 
1485 	/* Test4: RDMA CQ resize failed. */
1486 	tnum_wr = rpoller.required_num_wr;
1487 	tnum_cqe = rpoller.num_cqe;
1488 	idevice.transport_type = IBV_TRANSPORT_IB;
1489 	rdevice.attr.max_cqe = 30;
1490 	MOCK_SET(ibv_resize_cq, -1);
1491 
1492 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1493 	CU_ASSERT(rc == -1);
1494 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1495 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1496 
1497 	/* Test5: RDMA CQ resize success. rsize = MIN(MAX(num_cqe * 2, required_num_wr), device->attr.max_cqe). */
1498 	tnum_wr = rpoller.required_num_wr;
1499 	tnum_cqe = rpoller.num_cqe;
1500 	MOCK_SET(ibv_resize_cq, 0);
1501 
1502 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1503 	CU_ASSERT(rc == 0);
1504 	CU_ASSERT(rpoller.num_cqe = 30);
1505 	CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1506 	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1507 	CU_ASSERT(rpoller.num_cqe > tnum_cqe);
1508 }
1509 
1510 int
1511 main(int argc, char **argv)
1512 {
1513 	CU_pSuite	suite = NULL;
1514 	unsigned int	num_failures;
1515 
1516 	CU_set_error_action(CUEA_ABORT);
1517 	CU_initialize_registry();
1518 
1519 	suite = CU_add_suite("nvmf", NULL, NULL);
1520 
1521 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
1522 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
1523 	CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
1524 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
1525 	CU_ADD_TEST(suite, test_nvmf_rdma_opts_init);
1526 	CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data);
1527 	CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state);
1528 	CU_ADD_TEST(suite, test_nvmf_rdma_resources_create);
1529 	CU_ADD_TEST(suite, test_nvmf_rdma_qpair_compare);
1530 	CU_ADD_TEST(suite, test_nvmf_rdma_resize_cq);
1531 
1532 	CU_basic_set_mode(CU_BRM_VERBOSE);
1533 	CU_basic_run_tests();
1534 	num_failures = CU_get_number_of_failures();
1535 	CU_cleanup_registry();
1536 	return num_failures;
1537 }
1538