xref: /spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c (revision 877573897ad52be4fa8989f7617bd655b87e05c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "common/lib/test_env.c"
9 #include "common/lib/test_rdma.c"
10 #include "nvmf/rdma.c"
11 #include "nvmf/transport.c"
12 
13 #define RDMA_UT_UNITS_IN_MAX_IO 16
14 
15 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
16 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
17 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
18 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
19 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
20 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
21 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
22 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
23 };
24 
25 SPDK_LOG_REGISTER_COMPONENT(nvmf)
26 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
27 		uint64_t size, uint64_t translation), 0);
28 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
29 		uint64_t size), 0);
30 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
31 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
32 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
33 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
34 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
35 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0);
36 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
37 
38 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
39 		struct spdk_nvmf_ctrlr_data *cdata));
40 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
41 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
42 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
43 		const struct spdk_nvme_transport_id *trid2), 0);
44 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
45 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
46 		struct spdk_dif_ctx *dif_ctx), false);
47 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
48 		enum spdk_nvme_transport_type trtype));
49 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
50 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
51 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
52 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
53 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
54 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
55 
56 /* ibv_reg_mr can be a macro, need to undefine it */
57 #ifdef ibv_reg_mr
58 #undef ibv_reg_mr
59 #endif
60 
61 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
62 struct ibv_mr *
63 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
64 {
65 	HANDLE_RETURN_MOCK(ibv_reg_mr);
66 	if (length > 0) {
67 		return &g_rdma_mr;
68 	} else {
69 		return NULL;
70 	}
71 }
72 
73 int
74 ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
75 	     int attr_mask, struct ibv_qp_init_attr *init_attr)
76 {
77 	if (qp == NULL) {
78 		return -1;
79 	} else {
80 		attr->port_num = 80;
81 
82 		if (qp->state == IBV_QPS_ERR) {
83 			attr->qp_state = 10;
84 		} else {
85 			attr->qp_state = IBV_QPS_INIT;
86 		}
87 
88 		return 0;
89 	}
90 }
91 
92 const char *
93 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
94 {
95 	switch (trtype) {
96 	case SPDK_NVME_TRANSPORT_PCIE:
97 		return "PCIe";
98 	case SPDK_NVME_TRANSPORT_RDMA:
99 		return "RDMA";
100 	case SPDK_NVME_TRANSPORT_FC:
101 		return "FC";
102 	default:
103 		return NULL;
104 	}
105 }
106 
107 int
108 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
109 {
110 	int len, i;
111 
112 	if (trstring == NULL) {
113 		return -EINVAL;
114 	}
115 
116 	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
117 	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
118 		return -EINVAL;
119 	}
120 
121 	/* cast official trstring to uppercase version of input. */
122 	for (i = 0; i < len; i++) {
123 		trid->trstring[i] = toupper(trstring[i]);
124 	}
125 	return 0;
126 }
127 
128 static void
129 reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
130 {
131 	int i;
132 
133 	rdma_req->req.length = 0;
134 	rdma_req->req.data_from_pool = false;
135 	rdma_req->req.data = NULL;
136 	rdma_req->data.wr.num_sge = 0;
137 	rdma_req->data.wr.wr.rdma.remote_addr = 0;
138 	rdma_req->data.wr.wr.rdma.rkey = 0;
139 	rdma_req->offset = 0;
140 	memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
141 
142 	for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
143 		rdma_req->req.iov[i].iov_base = 0;
144 		rdma_req->req.iov[i].iov_len = 0;
145 		rdma_req->req.buffers[i] = 0;
146 		rdma_req->data.wr.sg_list[i].addr = 0;
147 		rdma_req->data.wr.sg_list[i].length = 0;
148 		rdma_req->data.wr.sg_list[i].lkey = 0;
149 	}
150 	rdma_req->req.iovcnt = 0;
151 	if (rdma_req->req.stripped_data) {
152 		free(rdma_req->req.stripped_data);
153 		rdma_req->req.stripped_data = NULL;
154 	}
155 }
156 
157 static void
158 test_spdk_nvmf_rdma_request_parse_sgl(void)
159 {
160 	struct spdk_nvmf_rdma_transport rtransport;
161 	struct spdk_nvmf_rdma_device device;
162 	struct spdk_nvmf_rdma_request rdma_req = {};
163 	struct spdk_nvmf_rdma_recv recv;
164 	struct spdk_nvmf_rdma_poll_group group;
165 	struct spdk_nvmf_rdma_qpair rqpair;
166 	struct spdk_nvmf_rdma_poller poller;
167 	union nvmf_c2h_msg cpl;
168 	union nvmf_h2c_msg cmd;
169 	struct spdk_nvme_sgl_descriptor *sgl;
170 	struct spdk_nvmf_transport_pg_cache_buf bufs[4];
171 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
172 	struct spdk_nvmf_rdma_request_data data;
173 	int rc, i;
174 	uint32_t sgl_length;
175 	uintptr_t aligned_buffer_address;
176 
177 	data.wr.sg_list = data.sgl;
178 	STAILQ_INIT(&group.group.buf_cache);
179 	group.group.buf_cache_size = 0;
180 	group.group.buf_cache_count = 0;
181 	group.group.transport = &rtransport.transport;
182 	poller.group = &group;
183 	rqpair.poller = &poller;
184 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
185 
186 	sgl = &cmd.nvme_cmd.dptr.sgl1;
187 	rdma_req.recv = &recv;
188 	rdma_req.req.cmd = &cmd;
189 	rdma_req.req.rsp = &cpl;
190 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
191 	rdma_req.req.qpair = &rqpair.qpair;
192 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
193 
194 	rtransport.transport.opts = g_rdma_ut_transport_opts;
195 	rtransport.data_wr_pool = NULL;
196 	rtransport.transport.data_buf_pool = NULL;
197 
198 	device.attr.device_cap_flags = 0;
199 	sgl->keyed.key = 0xEEEE;
200 	sgl->address = 0xFFFF;
201 	rdma_req.recv->buf = (void *)0xDDDD;
202 
203 	/* Test 1: sgl type: keyed data block subtype: address */
204 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
205 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
206 
207 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
208 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
209 	reset_nvmf_rdma_request(&rdma_req);
210 	sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
211 
212 	device.map = (void *)0x0;
213 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
214 	CU_ASSERT(rc == 0);
215 	CU_ASSERT(rdma_req.req.data_from_pool == true);
216 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
217 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
218 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
219 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
220 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
221 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
222 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
223 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
224 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
225 
226 	/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
227 	reset_nvmf_rdma_request(&rdma_req);
228 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
229 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
230 
231 	CU_ASSERT(rc == 0);
232 	CU_ASSERT(rdma_req.req.data_from_pool == true);
233 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
234 	CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
235 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
236 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
237 	for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
238 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
239 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
240 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
241 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
242 	}
243 
244 	/* Part 3: simple I/O one SGL larger than the transport max io size */
245 	reset_nvmf_rdma_request(&rdma_req);
246 	sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
247 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
248 
249 	CU_ASSERT(rc == -1);
250 
251 	/* Part 4: Pretend there are no buffer pools */
252 	MOCK_SET(spdk_mempool_get, NULL);
253 	reset_nvmf_rdma_request(&rdma_req);
254 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
255 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
256 
257 	CU_ASSERT(rc == 0);
258 	CU_ASSERT(rdma_req.req.data_from_pool == false);
259 	CU_ASSERT(rdma_req.req.data == NULL);
260 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
261 	CU_ASSERT(rdma_req.req.buffers[0] == NULL);
262 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
263 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
264 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
265 
266 	rdma_req.recv->buf = (void *)0xDDDD;
267 	/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
268 	sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
269 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
270 
271 	/* Part 1: Normal I/O smaller than in capsule data size no offset */
272 	reset_nvmf_rdma_request(&rdma_req);
273 	sgl->address = 0;
274 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
275 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
276 
277 	CU_ASSERT(rc == 0);
278 	CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
279 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
280 	CU_ASSERT(rdma_req.req.data_from_pool == false);
281 
282 	/* Part 2: I/O offset + length too large */
283 	reset_nvmf_rdma_request(&rdma_req);
284 	sgl->address = rtransport.transport.opts.in_capsule_data_size;
285 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
286 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
287 
288 	CU_ASSERT(rc == -1);
289 
290 	/* Part 3: I/O too large */
291 	reset_nvmf_rdma_request(&rdma_req);
292 	sgl->address = 0;
293 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
294 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
295 
296 	CU_ASSERT(rc == -1);
297 
298 	/* Test 3: Multi SGL */
299 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
300 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
301 	sgl->address = 0;
302 	rdma_req.recv->buf = (void *)&sgl_desc;
303 	MOCK_SET(spdk_mempool_get, &data);
304 
305 	/* part 1: 2 segments each with 1 wr. */
306 	reset_nvmf_rdma_request(&rdma_req);
307 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
308 	for (i = 0; i < 2; i++) {
309 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
310 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
311 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
312 		sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
313 		sgl_desc[i].keyed.key = 0x44;
314 	}
315 
316 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
317 
318 	CU_ASSERT(rc == 0);
319 	CU_ASSERT(rdma_req.req.data_from_pool == true);
320 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
321 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
322 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
323 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
324 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
325 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
326 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
327 	CU_ASSERT(data.wr.num_sge == 1);
328 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
329 
330 	/* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
331 	reset_nvmf_rdma_request(&rdma_req);
332 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
333 	for (i = 0; i < 2; i++) {
334 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
335 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
336 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
337 		sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
338 		sgl_desc[i].keyed.key = 0x44;
339 	}
340 
341 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
342 
343 	CU_ASSERT(rc == 0);
344 	CU_ASSERT(rdma_req.req.data_from_pool == true);
345 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
346 	CU_ASSERT(rdma_req.req.iovcnt == 16);
347 	CU_ASSERT(rdma_req.data.wr.num_sge == 8);
348 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
349 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
350 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
351 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
352 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
353 	CU_ASSERT(data.wr.num_sge == 8);
354 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
355 
356 	/* part 3: 2 segments, one very large, one very small */
357 	reset_nvmf_rdma_request(&rdma_req);
358 	for (i = 0; i < 2; i++) {
359 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
360 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
361 		sgl_desc[i].keyed.key = 0x44;
362 	}
363 
364 	sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
365 				   rtransport.transport.opts.io_unit_size / 2;
366 	sgl_desc[0].address = 0x4000;
367 	sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
368 	sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
369 			      rtransport.transport.opts.io_unit_size / 2;
370 
371 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
372 
373 	CU_ASSERT(rc == 0);
374 	CU_ASSERT(rdma_req.req.data_from_pool == true);
375 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
376 	CU_ASSERT(rdma_req.req.iovcnt == 16);
377 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
378 	for (i = 0; i < 15; i++) {
379 		CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
380 	}
381 	CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
382 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
383 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
384 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
385 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
386 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
387 		  rtransport.transport.opts.io_unit_size / 2);
388 	CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
389 	CU_ASSERT(data.wr.num_sge == 1);
390 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
391 
392 	/* part 4: 2 SGL descriptors, each length is transport buffer / 2
393 	 * 1 transport buffers should be allocated */
394 	reset_nvmf_rdma_request(&rdma_req);
395 	aligned_buffer_address = ((uintptr_t)(&data) + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK;
396 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
397 	sgl_length = rtransport.transport.opts.io_unit_size / 2;
398 	for (i = 0; i < 2; i++) {
399 		sgl_desc[i].keyed.length = sgl_length;
400 		sgl_desc[i].address = 0x4000 + i * sgl_length;
401 	}
402 
403 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
404 
405 	CU_ASSERT(rc == 0);
406 	CU_ASSERT(rdma_req.req.data_from_pool == true);
407 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size);
408 	CU_ASSERT(rdma_req.req.iovcnt == 1);
409 
410 	CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length);
411 	/* We mocked mempool_get to return address of data variable. Mempool is used
412 	 * to get both additional WRs and data buffers, so data points to &data */
413 	CU_ASSERT(rdma_req.data.sgl[0].addr == aligned_buffer_address);
414 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
415 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
416 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
417 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
418 
419 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
420 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length);
421 	CU_ASSERT(data.sgl[0].length == sgl_length);
422 	CU_ASSERT(data.sgl[0].addr == aligned_buffer_address + sgl_length);
423 	CU_ASSERT(data.wr.num_sge == 1);
424 
425 	/* Test 4: use PG buffer cache */
426 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
427 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
428 	sgl->address = 0xFFFF;
429 	rdma_req.recv->buf = (void *)0xDDDD;
430 	sgl->keyed.key = 0xEEEE;
431 
432 	for (i = 0; i < 4; i++) {
433 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
434 	}
435 
436 	/* part 1: use the four buffers from the pg cache */
437 	group.group.buf_cache_size = 4;
438 	group.group.buf_cache_count = 4;
439 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
440 	reset_nvmf_rdma_request(&rdma_req);
441 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
442 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
443 
444 	SPDK_CU_ASSERT_FATAL(rc == 0);
445 	CU_ASSERT(rdma_req.req.data_from_pool == true);
446 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
447 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
448 			~NVMF_DATA_BUFFER_MASK));
449 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
450 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
451 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
452 	CU_ASSERT(group.group.buf_cache_count == 0);
453 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
454 	for (i = 0; i < 4; i++) {
455 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
456 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
457 				~NVMF_DATA_BUFFER_MASK));
458 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
459 	}
460 
461 	/* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
462 	reset_nvmf_rdma_request(&rdma_req);
463 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
464 
465 	SPDK_CU_ASSERT_FATAL(rc == 0);
466 	CU_ASSERT(rdma_req.req.data_from_pool == true);
467 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
468 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
469 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
470 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
471 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
472 	CU_ASSERT(group.group.buf_cache_count == 0);
473 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
474 	for (i = 0; i < 4; i++) {
475 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
476 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
477 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
478 		CU_ASSERT(group.group.buf_cache_count == 0);
479 	}
480 
481 	/* part 3: half and half */
482 	group.group.buf_cache_count = 2;
483 
484 	for (i = 0; i < 2; i++) {
485 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
486 	}
487 	reset_nvmf_rdma_request(&rdma_req);
488 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
489 
490 	SPDK_CU_ASSERT_FATAL(rc == 0);
491 	CU_ASSERT(rdma_req.req.data_from_pool == true);
492 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
493 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
494 			~NVMF_DATA_BUFFER_MASK));
495 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
496 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
497 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
498 	CU_ASSERT(group.group.buf_cache_count == 0);
499 	for (i = 0; i < 2; i++) {
500 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
501 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
502 				~NVMF_DATA_BUFFER_MASK));
503 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
504 	}
505 	for (i = 2; i < 4; i++) {
506 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
507 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
508 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
509 	}
510 
511 	reset_nvmf_rdma_request(&rdma_req);
512 }
513 
514 static struct spdk_nvmf_rdma_recv *
515 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
516 {
517 	struct spdk_nvmf_rdma_recv *rdma_recv;
518 	union nvmf_h2c_msg *cmd;
519 	struct spdk_nvme_sgl_descriptor *sgl;
520 
521 	rdma_recv = calloc(1, sizeof(*rdma_recv));
522 	rdma_recv->qpair = rqpair;
523 	cmd = calloc(1, sizeof(*cmd));
524 	rdma_recv->sgl[0].addr = (uintptr_t)cmd;
525 	cmd->nvme_cmd.opc = opc;
526 	sgl = &cmd->nvme_cmd.dptr.sgl1;
527 	sgl->keyed.key = 0xEEEE;
528 	sgl->address = 0xFFFF;
529 	sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
530 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
531 	sgl->keyed.length = 1;
532 
533 	return rdma_recv;
534 }
535 
536 static void
537 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
538 {
539 	free((void *)rdma_recv->sgl[0].addr);
540 	free(rdma_recv);
541 }
542 
543 static struct spdk_nvmf_rdma_request *
544 create_req(struct spdk_nvmf_rdma_qpair *rqpair,
545 	   struct spdk_nvmf_rdma_recv *rdma_recv)
546 {
547 	struct spdk_nvmf_rdma_request *rdma_req;
548 	union nvmf_c2h_msg *cpl;
549 
550 	rdma_req = calloc(1, sizeof(*rdma_req));
551 	rdma_req->recv = rdma_recv;
552 	rdma_req->req.qpair = &rqpair->qpair;
553 	rdma_req->state = RDMA_REQUEST_STATE_NEW;
554 	rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
555 	rdma_req->data.wr.sg_list = rdma_req->data.sgl;
556 	cpl = calloc(1, sizeof(*cpl));
557 	rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
558 	rdma_req->req.rsp = cpl;
559 
560 	return rdma_req;
561 }
562 
563 static void
564 free_req(struct spdk_nvmf_rdma_request *rdma_req)
565 {
566 	free((void *)rdma_req->rsp.sgl[0].addr);
567 	free(rdma_req);
568 }
569 
570 static void
571 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
572 	    struct spdk_nvmf_rdma_poller *poller,
573 	    struct spdk_nvmf_rdma_device *device,
574 	    struct spdk_nvmf_rdma_resources *resources,
575 	    struct spdk_nvmf_transport *transport)
576 {
577 	memset(rqpair, 0, sizeof(*rqpair));
578 	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
579 	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
580 	rqpair->poller = poller;
581 	rqpair->device = device;
582 	rqpair->resources = resources;
583 	rqpair->qpair.qid = 1;
584 	rqpair->ibv_state = IBV_QPS_RTS;
585 	rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
586 	rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
587 	rqpair->max_send_depth = 16;
588 	rqpair->max_read_depth = 16;
589 	rqpair->qpair.transport = transport;
590 }
591 
592 static void
593 poller_reset(struct spdk_nvmf_rdma_poller *poller,
594 	     struct spdk_nvmf_rdma_poll_group *group)
595 {
596 	memset(poller, 0, sizeof(*poller));
597 	STAILQ_INIT(&poller->qpairs_pending_recv);
598 	STAILQ_INIT(&poller->qpairs_pending_send);
599 	poller->group = group;
600 }
601 
602 static void
603 test_spdk_nvmf_rdma_request_process(void)
604 {
605 	struct spdk_nvmf_rdma_transport rtransport = {};
606 	struct spdk_nvmf_rdma_poll_group group = {};
607 	struct spdk_nvmf_rdma_poller poller = {};
608 	struct spdk_nvmf_rdma_device device = {};
609 	struct spdk_nvmf_rdma_resources resources = {};
610 	struct spdk_nvmf_rdma_qpair rqpair = {};
611 	struct spdk_nvmf_rdma_recv *rdma_recv;
612 	struct spdk_nvmf_rdma_request *rdma_req;
613 	bool progress;
614 
615 	STAILQ_INIT(&group.group.buf_cache);
616 	STAILQ_INIT(&group.group.pending_buf_queue);
617 	group.group.buf_cache_size = 0;
618 	group.group.buf_cache_count = 0;
619 	poller_reset(&poller, &group);
620 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
621 
622 	rtransport.transport.opts = g_rdma_ut_transport_opts;
623 	rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
624 	rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
625 				  sizeof(struct spdk_nvmf_rdma_request_data),
626 				  0, 0);
627 	MOCK_CLEAR(spdk_mempool_get);
628 
629 	device.attr.device_cap_flags = 0;
630 	device.map = (void *)0x0;
631 
632 	/* Test 1: single SGL READ request */
633 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
634 	rdma_req = create_req(&rqpair, rdma_recv);
635 	rqpair.current_recv_depth = 1;
636 	/* NEW -> EXECUTING */
637 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
638 	CU_ASSERT(progress == true);
639 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
640 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
641 	/* EXECUTED -> TRANSFERRING_C2H */
642 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
643 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
644 	CU_ASSERT(progress == true);
645 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
646 	CU_ASSERT(rdma_req->recv == NULL);
647 	/* COMPLETED -> FREE */
648 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
649 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
650 	CU_ASSERT(progress == true);
651 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
652 
653 	free_recv(rdma_recv);
654 	free_req(rdma_req);
655 	poller_reset(&poller, &group);
656 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
657 
658 	/* Test 2: single SGL WRITE request */
659 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
660 	rdma_req = create_req(&rqpair, rdma_recv);
661 	rqpair.current_recv_depth = 1;
662 	/* NEW -> TRANSFERRING_H2C */
663 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
664 	CU_ASSERT(progress == true);
665 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
666 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
667 	STAILQ_INIT(&poller.qpairs_pending_send);
668 	/* READY_TO_EXECUTE -> EXECUTING */
669 	rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
670 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
671 	CU_ASSERT(progress == true);
672 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
673 	/* EXECUTED -> COMPLETING */
674 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
675 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
676 	CU_ASSERT(progress == true);
677 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
678 	CU_ASSERT(rdma_req->recv == NULL);
679 	/* COMPLETED -> FREE */
680 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
681 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
682 	CU_ASSERT(progress == true);
683 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
684 
685 	free_recv(rdma_recv);
686 	free_req(rdma_req);
687 	poller_reset(&poller, &group);
688 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
689 
690 	/* Test 3: WRITE+WRITE ibv_send batching */
691 	{
692 		struct spdk_nvmf_rdma_recv *recv1, *recv2;
693 		struct spdk_nvmf_rdma_request *req1, *req2;
694 		recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
695 		req1 = create_req(&rqpair, recv1);
696 		recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
697 		req2 = create_req(&rqpair, recv2);
698 
699 		/* WRITE 1: NEW -> TRANSFERRING_H2C */
700 		rqpair.current_recv_depth = 1;
701 		nvmf_rdma_request_process(&rtransport, req1);
702 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
703 
704 		/* WRITE 2: NEW -> TRANSFERRING_H2C */
705 		rqpair.current_recv_depth = 2;
706 		nvmf_rdma_request_process(&rtransport, req2);
707 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
708 
709 		STAILQ_INIT(&poller.qpairs_pending_send);
710 
711 		/* WRITE 1 completes before WRITE 2 has finished RDMA reading */
712 		/* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
713 		req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
714 		nvmf_rdma_request_process(&rtransport, req1);
715 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
716 		/* WRITE 1: EXECUTED -> COMPLETING */
717 		req1->state = RDMA_REQUEST_STATE_EXECUTED;
718 		nvmf_rdma_request_process(&rtransport, req1);
719 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
720 		STAILQ_INIT(&poller.qpairs_pending_send);
721 		/* WRITE 1: COMPLETED -> FREE */
722 		req1->state = RDMA_REQUEST_STATE_COMPLETED;
723 		nvmf_rdma_request_process(&rtransport, req1);
724 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
725 
726 		/* Now WRITE 2 has finished reading and completes */
727 		/* WRITE 2: COMPLETED -> FREE */
728 		/* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
729 		req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
730 		nvmf_rdma_request_process(&rtransport, req2);
731 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
732 		/* WRITE 1: EXECUTED -> COMPLETING */
733 		req2->state = RDMA_REQUEST_STATE_EXECUTED;
734 		nvmf_rdma_request_process(&rtransport, req2);
735 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
736 		STAILQ_INIT(&poller.qpairs_pending_send);
737 		/* WRITE 1: COMPLETED -> FREE */
738 		req2->state = RDMA_REQUEST_STATE_COMPLETED;
739 		nvmf_rdma_request_process(&rtransport, req2);
740 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
741 
742 		free_recv(recv1);
743 		free_req(req1);
744 		free_recv(recv2);
745 		free_req(req2);
746 		poller_reset(&poller, &group);
747 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
748 	}
749 
750 	/* Test 4, invalid command, check xfer type */
751 	{
752 		struct spdk_nvmf_rdma_recv *rdma_recv_inv;
753 		struct spdk_nvmf_rdma_request *rdma_req_inv;
754 		/* construct an opcode that specifies BIDIRECTIONAL transfer */
755 		uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
756 
757 		rdma_recv_inv = create_recv(&rqpair, opc);
758 		rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
759 
760 		/* NEW -> RDMA_REQUEST_STATE_COMPLETING */
761 		rqpair.current_recv_depth = 1;
762 		progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
763 		CU_ASSERT(progress == true);
764 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
765 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
766 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
767 
768 		/* RDMA_REQUEST_STATE_COMPLETED -> FREE */
769 		rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
770 		nvmf_rdma_request_process(&rtransport, rdma_req_inv);
771 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
772 
773 		free_recv(rdma_recv_inv);
774 		free_req(rdma_req_inv);
775 		poller_reset(&poller, &group);
776 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
777 	}
778 
779 	spdk_mempool_free(rtransport.transport.data_buf_pool);
780 	spdk_mempool_free(rtransport.data_wr_pool);
781 }
782 
783 #define TEST_GROUPS_COUNT 5
784 static void
785 test_nvmf_rdma_get_optimal_poll_group(void)
786 {
787 	struct spdk_nvmf_rdma_transport rtransport = {};
788 	struct spdk_nvmf_transport *transport = &rtransport.transport;
789 	struct spdk_nvmf_rdma_qpair rqpair = {};
790 	struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
791 	struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
792 	struct spdk_nvmf_transport_poll_group *result;
793 	struct spdk_nvmf_poll_group group = {};
794 	uint32_t i;
795 
796 	rqpair.qpair.transport = transport;
797 	TAILQ_INIT(&rtransport.poll_groups);
798 
799 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
800 		groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
801 		CU_ASSERT(groups[i] != NULL);
802 		groups[i]->group = &group;
803 		rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
804 		groups[i]->transport = transport;
805 	}
806 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
807 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
808 
809 	/* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
810 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
811 		rqpair.qpair.qid = 0;
812 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
813 		CU_ASSERT(result == groups[i]);
814 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
815 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
816 
817 		rqpair.qpair.qid = 1;
818 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
819 		CU_ASSERT(result == groups[i]);
820 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
821 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
822 	}
823 	/* wrap around, admin/io pg point to the first pg
824 	   Destroy all poll groups except of the last one */
825 	for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
826 		nvmf_rdma_poll_group_destroy(groups[i]);
827 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
828 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
829 	}
830 
831 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
832 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
833 
834 	/* Check that pointers to the next admin/io poll groups are not changed */
835 	rqpair.qpair.qid = 0;
836 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
837 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
838 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
839 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
840 
841 	rqpair.qpair.qid = 1;
842 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
843 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
844 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
845 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
846 
847 	/* Remove the last poll group, check that pointers are NULL */
848 	nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
849 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
850 	CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
851 
852 	/* Request optimal poll group, result must be NULL */
853 	rqpair.qpair.qid = 0;
854 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
855 	CU_ASSERT(result == NULL);
856 
857 	rqpair.qpair.qid = 1;
858 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
859 	CU_ASSERT(result == NULL);
860 }
861 #undef TEST_GROUPS_COUNT
862 
863 static void
864 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
865 {
866 	struct spdk_nvmf_rdma_transport rtransport;
867 	struct spdk_nvmf_rdma_device device;
868 	struct spdk_nvmf_rdma_request rdma_req = {};
869 	struct spdk_nvmf_rdma_recv recv;
870 	struct spdk_nvmf_rdma_poll_group group;
871 	struct spdk_nvmf_rdma_qpair rqpair;
872 	struct spdk_nvmf_rdma_poller poller;
873 	union nvmf_c2h_msg cpl;
874 	union nvmf_h2c_msg cmd;
875 	struct spdk_nvme_sgl_descriptor *sgl;
876 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
877 	char data_buffer[8192];
878 	struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer;
879 	char data2_buffer[8192];
880 	struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
881 	const uint32_t data_bs = 512;
882 	const uint32_t md_size = 8;
883 	int rc, i;
884 	void *aligned_buffer;
885 
886 	data->wr.sg_list = data->sgl;
887 	STAILQ_INIT(&group.group.buf_cache);
888 	group.group.buf_cache_size = 0;
889 	group.group.buf_cache_count = 0;
890 	group.group.transport = &rtransport.transport;
891 	poller.group = &group;
892 	rqpair.poller = &poller;
893 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
894 
895 	sgl = &cmd.nvme_cmd.dptr.sgl1;
896 	rdma_req.recv = &recv;
897 	rdma_req.req.cmd = &cmd;
898 	rdma_req.req.rsp = &cpl;
899 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
900 	rdma_req.req.qpair = &rqpair.qpair;
901 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
902 
903 	rtransport.transport.opts = g_rdma_ut_transport_opts;
904 	rtransport.data_wr_pool = NULL;
905 	rtransport.transport.data_buf_pool = NULL;
906 
907 	device.attr.device_cap_flags = 0;
908 	device.map = NULL;
909 	sgl->keyed.key = 0xEEEE;
910 	sgl->address = 0xFFFF;
911 	rdma_req.recv->buf = (void *)0xDDDD;
912 
913 	/* Test 1: sgl type: keyed data block subtype: address */
914 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
915 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
916 
917 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
918 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
919 	reset_nvmf_rdma_request(&rdma_req);
920 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
921 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
922 			  0, 0, 0, 0, 0);
923 	rdma_req.req.dif_enabled = true;
924 	rtransport.transport.opts.io_unit_size = data_bs * 8;
925 	rdma_req.req.qpair->transport = &rtransport.transport;
926 	sgl->keyed.length = data_bs * 4;
927 
928 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
929 
930 	CU_ASSERT(rc == 0);
931 	CU_ASSERT(rdma_req.req.data_from_pool == true);
932 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
933 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
934 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
935 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
936 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
937 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
938 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
939 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
940 
941 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
942 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
943 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
944 
945 	/* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
946 		block size 512 */
947 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
948 	reset_nvmf_rdma_request(&rdma_req);
949 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
950 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
951 			  0, 0, 0, 0, 0);
952 	rdma_req.req.dif_enabled = true;
953 	rtransport.transport.opts.io_unit_size = data_bs * 4;
954 	sgl->keyed.length = data_bs * 4;
955 
956 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
957 
958 	CU_ASSERT(rc == 0);
959 	CU_ASSERT(rdma_req.req.data_from_pool == true);
960 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
961 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
962 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
963 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
964 	CU_ASSERT(rdma_req.data.wr.num_sge == 5);
965 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
966 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
967 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
968 
969 	for (i = 0; i < 3; ++i) {
970 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
971 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
972 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
973 	}
974 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
975 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
976 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
977 
978 	/* 2nd buffer consumed */
979 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
980 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
981 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
982 
983 	/* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
984 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
985 	reset_nvmf_rdma_request(&rdma_req);
986 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
987 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
988 			  0, 0, 0, 0, 0);
989 	rdma_req.req.dif_enabled = true;
990 	rtransport.transport.opts.io_unit_size = data_bs;
991 	sgl->keyed.length = data_bs;
992 
993 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
994 
995 	CU_ASSERT(rc == 0);
996 	CU_ASSERT(rdma_req.req.data_from_pool == true);
997 	CU_ASSERT(rdma_req.req.length == data_bs);
998 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
999 	CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
1000 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1001 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1002 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1003 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1004 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1005 
1006 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1007 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
1008 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1009 
1010 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1011 	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
1012 	CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
1013 	/* 2nd buffer consumed for metadata */
1014 	CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
1015 	CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
1016 
1017 	/* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
1018 	   block size 512 */
1019 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1020 	reset_nvmf_rdma_request(&rdma_req);
1021 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1022 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1023 			  0, 0, 0, 0, 0);
1024 	rdma_req.req.dif_enabled = true;
1025 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1026 	sgl->keyed.length = data_bs * 4;
1027 
1028 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1029 
1030 	CU_ASSERT(rc == 0);
1031 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1032 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1033 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1034 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1035 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1036 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1037 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1038 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1039 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1040 
1041 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1042 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length);
1043 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1044 
1045 	/* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
1046 	   block size 512 */
1047 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1048 	reset_nvmf_rdma_request(&rdma_req);
1049 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1050 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1051 			  0, 0, 0, 0, 0);
1052 	rdma_req.req.dif_enabled = true;
1053 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
1054 	sgl->keyed.length = data_bs * 4;
1055 
1056 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1057 
1058 	CU_ASSERT(rc == 0);
1059 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1060 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1061 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1062 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1063 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1064 	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1065 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1066 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1067 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1068 
1069 	for (i = 0; i < 2; ++i) {
1070 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
1071 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs * 2);
1072 	}
1073 
1074 	/* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
1075 	   block size 512 */
1076 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1077 	reset_nvmf_rdma_request(&rdma_req);
1078 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1079 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1080 			  0, 0, 0, 0, 0);
1081 	rdma_req.req.dif_enabled = true;
1082 	rtransport.transport.opts.io_unit_size = data_bs * 4;
1083 	sgl->keyed.length = data_bs * 6;
1084 
1085 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1086 
1087 	CU_ASSERT(rc == 0);
1088 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1089 	CU_ASSERT(rdma_req.req.length == data_bs * 6);
1090 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1091 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
1092 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1093 	CU_ASSERT(rdma_req.data.wr.num_sge == 7);
1094 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1095 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1096 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1097 
1098 	for (i = 0; i < 3; ++i) {
1099 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1100 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1101 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1102 	}
1103 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
1104 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
1105 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
1106 
1107 	/* 2nd IO buffer consumed */
1108 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
1109 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
1110 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
1111 
1112 	CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
1113 	CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
1114 	CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY);
1115 
1116 	CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
1117 	CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
1118 	CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY);
1119 
1120 	/* Part 7: simple I/O, number of SGL entries exceeds the number of entries
1121 	   one WR can hold. Additional WR is chained */
1122 	MOCK_SET(spdk_mempool_get, data2_buffer);
1123 	aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) &
1124 				  ~NVMF_DATA_BUFFER_MASK);
1125 	reset_nvmf_rdma_request(&rdma_req);
1126 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1127 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1128 			  0, 0, 0, 0, 0);
1129 	rdma_req.req.dif_enabled = true;
1130 	rtransport.transport.opts.io_unit_size = data_bs * 16;
1131 	sgl->keyed.length = data_bs * 16;
1132 
1133 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1134 
1135 	CU_ASSERT(rc == 0);
1136 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1137 	CU_ASSERT(rdma_req.req.length == data_bs * 16);
1138 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1139 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1140 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
1141 	CU_ASSERT(rdma_req.req.data == aligned_buffer);
1142 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
1143 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1144 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1145 
1146 	for (i = 0; i < 15; ++i) {
1147 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1148 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1149 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1150 	}
1151 
1152 	/* 8192 - (512 + 8) * 15 = 392 */
1153 	CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size));
1154 	CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392);
1155 	CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1156 
1157 	/* additional wr from pool */
1158 	CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr);
1159 	CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
1160 	CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
1161 	/* 2nd IO buffer */
1162 	CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)aligned_buffer);
1163 	CU_ASSERT(data2->wr.sg_list[0].length == 120);
1164 	CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY);
1165 
1166 	/* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
1167 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1168 	reset_nvmf_rdma_request(&rdma_req);
1169 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1170 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1171 			  0, 0, 0, 0, 0);
1172 	rdma_req.req.dif_enabled = true;
1173 	rtransport.transport.opts.io_unit_size = 516;
1174 	sgl->keyed.length = data_bs * 2;
1175 
1176 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1177 
1178 	CU_ASSERT(rc == 0);
1179 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1180 	CU_ASSERT(rdma_req.req.length == data_bs * 2);
1181 	CU_ASSERT(rdma_req.req.iovcnt == 3);
1182 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1183 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
1184 	CU_ASSERT(rdma_req.req.data == (void *)0x2000);
1185 	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1186 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1187 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1188 
1189 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1190 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
1191 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1192 
1193 	/* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
1194 	  is located at the beginning of that buffer */
1195 	CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
1196 	CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
1197 	CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
1198 
1199 	/* Test 2: Multi SGL */
1200 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1201 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1202 	sgl->address = 0;
1203 	rdma_req.recv->buf = (void *)&sgl_desc;
1204 	MOCK_SET(spdk_mempool_get, data_buffer);
1205 	aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) &
1206 				  ~NVMF_DATA_BUFFER_MASK);
1207 
1208 	/* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
1209 	reset_nvmf_rdma_request(&rdma_req);
1210 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1211 			  SPDK_DIF_TYPE1,
1212 			  SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
1213 	rdma_req.req.dif_enabled = true;
1214 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1215 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
1216 
1217 	for (i = 0; i < 2; i++) {
1218 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1219 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1220 		sgl_desc[i].keyed.length = data_bs * 4;
1221 		sgl_desc[i].address = 0x4000 + i * data_bs * 4;
1222 		sgl_desc[i].keyed.key = 0x44;
1223 	}
1224 
1225 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1226 
1227 	CU_ASSERT(rc == 0);
1228 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1229 	CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
1230 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1231 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
1232 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
1233 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1234 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs * 4);
1235 
1236 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
1237 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
1238 	CU_ASSERT(rdma_req.data.wr.next == &data->wr);
1239 	CU_ASSERT(data->wr.wr.rdma.rkey == 0x44);
1240 	CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
1241 	CU_ASSERT(data->wr.num_sge == 1);
1242 	CU_ASSERT(data->wr.sg_list[0].addr == (uintptr_t)(aligned_buffer));
1243 	CU_ASSERT(data->wr.sg_list[0].length == data_bs * 4);
1244 
1245 	CU_ASSERT(data->wr.next == &rdma_req.rsp.wr);
1246 	reset_nvmf_rdma_request(&rdma_req);
1247 }
1248 
1249 static void
1250 test_nvmf_rdma_opts_init(void)
1251 {
1252 	struct spdk_nvmf_transport_opts	opts = {};
1253 
1254 	nvmf_rdma_opts_init(&opts);
1255 	CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH);
1256 	CU_ASSERT(opts.max_qpairs_per_ctrlr ==	SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR);
1257 	CU_ASSERT(opts.in_capsule_data_size ==	SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE);
1258 	CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE);
1259 	CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE);
1260 	CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH);
1261 	CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS);
1262 	CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE);
1263 	CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP);
1264 	CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC);
1265 	CU_ASSERT(opts.transport_specific == NULL);
1266 }
1267 
1268 static void
1269 test_nvmf_rdma_request_free_data(void)
1270 {
1271 	struct spdk_nvmf_rdma_request rdma_req = {};
1272 	struct spdk_nvmf_rdma_transport rtransport = {};
1273 	struct spdk_nvmf_rdma_request_data *next_request_data = NULL;
1274 
1275 	MOCK_CLEAR(spdk_mempool_get);
1276 	rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
1277 				  SPDK_NVMF_MAX_SGL_ENTRIES,
1278 				  sizeof(struct spdk_nvmf_rdma_request_data),
1279 				  SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
1280 				  SPDK_ENV_SOCKET_ID_ANY);
1281 	next_request_data = spdk_mempool_get(rtransport.data_wr_pool);
1282 	SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count ==
1283 			     SPDK_NVMF_MAX_SGL_ENTRIES - 1);
1284 	next_request_data->wr.wr_id = 1;
1285 	next_request_data->wr.num_sge = 2;
1286 	next_request_data->wr.next = NULL;
1287 	rdma_req.data.wr.next = &next_request_data->wr;
1288 	rdma_req.data.wr.wr_id = 1;
1289 	rdma_req.data.wr.num_sge = 2;
1290 
1291 	nvmf_rdma_request_free_data(&rdma_req, &rtransport);
1292 	/* Check if next_request_data put into memory pool */
1293 	CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES);
1294 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
1295 
1296 	spdk_mempool_free(rtransport.data_wr_pool);
1297 }
1298 
1299 static void
1300 test_nvmf_rdma_update_ibv_state(void)
1301 {
1302 	struct spdk_nvmf_rdma_qpair rqpair = {};
1303 	struct spdk_rdma_qp rdma_qp = {};
1304 	struct ibv_qp qp = {};
1305 	int rc = 0;
1306 
1307 	rqpair.rdma_qp = &rdma_qp;
1308 
1309 	/* Case 1: Failed to get updated RDMA queue pair state */
1310 	rqpair.ibv_state = IBV_QPS_INIT;
1311 	rqpair.rdma_qp->qp = NULL;
1312 
1313 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1314 	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1315 
1316 	/* Case 2: Bad state updated */
1317 	rqpair.rdma_qp->qp = &qp;
1318 	qp.state = IBV_QPS_ERR;
1319 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1320 	CU_ASSERT(rqpair.ibv_state == 10);
1321 	CU_ASSERT(rc == IBV_QPS_ERR + 1);
1322 
1323 	/* Case 3: Pass */
1324 	qp.state = IBV_QPS_INIT;
1325 	rc = nvmf_rdma_update_ibv_state(&rqpair);
1326 	CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT);
1327 	CU_ASSERT(rc == IBV_QPS_INIT);
1328 }
1329 
1330 static void
1331 test_nvmf_rdma_resources_create(void)
1332 {
1333 	static struct spdk_nvmf_rdma_resources *rdma_resource;
1334 	struct spdk_nvmf_rdma_resource_opts opts = {};
1335 	struct spdk_nvmf_rdma_qpair qpair = {};
1336 	struct spdk_nvmf_rdma_recv *recv = NULL;
1337 	struct spdk_nvmf_rdma_request *req = NULL;
1338 	const int DEPTH = 128;
1339 
1340 	opts.max_queue_depth = DEPTH;
1341 	opts.in_capsule_data_size = 4096;
1342 	opts.shared = true;
1343 	opts.qpair = &qpair;
1344 
1345 	rdma_resource = nvmf_rdma_resources_create(&opts);
1346 	CU_ASSERT(rdma_resource != NULL);
1347 	/* Just check first and last entry */
1348 	recv = &rdma_resource->recvs[0];
1349 	req = &rdma_resource->reqs[0];
1350 	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1351 	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs));
1352 	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[0]);
1353 	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[0]));
1354 	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1355 	CU_ASSERT(recv->wr.num_sge == 2);
1356 	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[0].rdma_wr);
1357 	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[0].sgl);
1358 	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[0]);
1359 	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[0]);
1360 	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[0]));
1361 	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1362 	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1363 	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].rsp.rdma_wr);
1364 	CU_ASSERT(req->rsp.wr.next == NULL);
1365 	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1366 	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1367 	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[0].rsp.sgl);
1368 	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1369 	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1370 	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].data.rdma_wr);
1371 	CU_ASSERT(req->data.wr.next == NULL);
1372 	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1373 	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[0].data.sgl);
1374 	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1375 	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1376 
1377 	recv = &rdma_resource->recvs[DEPTH - 1];
1378 	req = &rdma_resource->reqs[DEPTH - 1];
1379 	CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV);
1380 	CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs +
1381 			(DEPTH - 1) * 4096));
1382 	CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[DEPTH - 1]);
1383 	CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[DEPTH - 1]));
1384 	CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY);
1385 	CU_ASSERT(recv->wr.num_sge == 2);
1386 	CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[DEPTH - 1].rdma_wr);
1387 	CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[DEPTH - 1].sgl);
1388 	CU_ASSERT(req->req.rsp == &rdma_resource->cpls[DEPTH - 1]);
1389 	CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[DEPTH - 1]);
1390 	CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[DEPTH - 1]));
1391 	CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY);
1392 	CU_ASSERT(req->rsp.rdma_wr.type == RDMA_WR_TYPE_SEND);
1393 	CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)
1394 		  &req->rsp.rdma_wr);
1395 	CU_ASSERT(req->rsp.wr.next == NULL);
1396 	CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND);
1397 	CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED);
1398 	CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[DEPTH - 1].rsp.sgl);
1399 	CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE);
1400 	CU_ASSERT(req->data.rdma_wr.type == RDMA_WR_TYPE_DATA);
1401 	CU_ASSERT(req->data.wr.wr_id == (uintptr_t)
1402 		  &req->data.rdma_wr);
1403 	CU_ASSERT(req->data.wr.next == NULL);
1404 	CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED);
1405 	CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[DEPTH - 1].data.sgl);
1406 	CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES);
1407 	CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE);
1408 
1409 	nvmf_rdma_resources_destroy(rdma_resource);
1410 }
1411 
1412 static void
1413 test_nvmf_rdma_qpair_compare(void)
1414 {
1415 	struct spdk_nvmf_rdma_qpair rqpair1 = {}, rqpair2 = {};
1416 
1417 	rqpair1.qp_num = 0;
1418 	rqpair2.qp_num = UINT32_MAX;
1419 
1420 	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair1, &rqpair2) < 0);
1421 	CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair2, &rqpair1) > 0);
1422 }
1423 
1424 static void
1425 test_nvmf_rdma_resize_cq(void)
1426 {
1427 	int rc = -1;
1428 	int tnum_wr = 0;
1429 	int tnum_cqe = 0;
1430 	struct spdk_nvmf_rdma_qpair rqpair = {};
1431 	struct spdk_nvmf_rdma_poller rpoller = {};
1432 	struct spdk_nvmf_rdma_device rdevice = {};
1433 	struct ibv_context ircontext = {};
1434 	struct ibv_device idevice = {};
1435 
1436 	rdevice.context = &ircontext;
1437 	rqpair.poller = &rpoller;
1438 	ircontext.device = &idevice;
1439 
1440 	/* Test1: Current capacity support required size. */
1441 	rpoller.required_num_wr = 10;
1442 	rpoller.num_cqe = 20;
1443 	rqpair.max_queue_depth = 2;
1444 	tnum_wr = rpoller.required_num_wr;
1445 	tnum_cqe = rpoller.num_cqe;
1446 
1447 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1448 	CU_ASSERT(rc == 0);
1449 	CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1450 	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1451 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1452 
1453 	/* Test2: iWARP doesn't support CQ resize. */
1454 	tnum_wr = rpoller.required_num_wr;
1455 	tnum_cqe = rpoller.num_cqe;
1456 	idevice.transport_type = IBV_TRANSPORT_IWARP;
1457 
1458 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1459 	CU_ASSERT(rc == -1);
1460 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1461 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1462 
1463 
1464 	/* Test3: RDMA CQE requirement exceeds device max_cqe limitation. */
1465 	tnum_wr = rpoller.required_num_wr;
1466 	tnum_cqe = rpoller.num_cqe;
1467 	idevice.transport_type = IBV_TRANSPORT_UNKNOWN;
1468 	rdevice.attr.max_cqe = 3;
1469 
1470 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1471 	CU_ASSERT(rc == -1);
1472 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1473 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1474 
1475 	/* Test4: RDMA CQ resize failed. */
1476 	tnum_wr = rpoller.required_num_wr;
1477 	tnum_cqe = rpoller.num_cqe;
1478 	idevice.transport_type = IBV_TRANSPORT_IB;
1479 	rdevice.attr.max_cqe = 30;
1480 	MOCK_SET(ibv_resize_cq, -1);
1481 
1482 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1483 	CU_ASSERT(rc == -1);
1484 	CU_ASSERT(rpoller.required_num_wr == tnum_wr);
1485 	CU_ASSERT(rpoller.num_cqe == tnum_cqe);
1486 
1487 	/* Test5: RDMA CQ resize success. rsize = MIN(MAX(num_cqe * 2, required_num_wr), device->attr.max_cqe). */
1488 	tnum_wr = rpoller.required_num_wr;
1489 	tnum_cqe = rpoller.num_cqe;
1490 	MOCK_SET(ibv_resize_cq, 0);
1491 
1492 	rc = nvmf_rdma_resize_cq(&rqpair, &rdevice);
1493 	CU_ASSERT(rc == 0);
1494 	CU_ASSERT(rpoller.num_cqe = 30);
1495 	CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth));
1496 	CU_ASSERT(rpoller.required_num_wr > tnum_wr);
1497 	CU_ASSERT(rpoller.num_cqe > tnum_cqe);
1498 }
1499 
1500 int
1501 main(int argc, char **argv)
1502 {
1503 	CU_pSuite	suite = NULL;
1504 	unsigned int	num_failures;
1505 
1506 	CU_set_error_action(CUEA_ABORT);
1507 	CU_initialize_registry();
1508 
1509 	suite = CU_add_suite("nvmf", NULL, NULL);
1510 
1511 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
1512 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
1513 	CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
1514 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
1515 	CU_ADD_TEST(suite, test_nvmf_rdma_opts_init);
1516 	CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data);
1517 	CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state);
1518 	CU_ADD_TEST(suite, test_nvmf_rdma_resources_create);
1519 	CU_ADD_TEST(suite, test_nvmf_rdma_qpair_compare);
1520 	CU_ADD_TEST(suite, test_nvmf_rdma_resize_cq);
1521 
1522 	CU_basic_set_mode(CU_BRM_VERBOSE);
1523 	CU_basic_run_tests();
1524 	num_failures = CU_get_number_of_failures();
1525 	CU_cleanup_registry();
1526 	return num_failures;
1527 }
1528