xref: /spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c (revision 5a7d428d0fa7ace275e7dc5fe97f6a2ae6ad012d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *   Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk_cunit.h"
36 #include "common/lib/test_env.c"
37 #include "common/lib/test_rdma.c"
38 #include "nvmf/rdma.c"
39 #include "nvmf/transport.c"
40 
41 #define RDMA_UT_UNITS_IN_MAX_IO 16
42 
43 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
44 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
45 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
46 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
47 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
48 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
49 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
50 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
51 };
52 
53 SPDK_LOG_REGISTER_COMPONENT(nvmf)
54 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
55 		uint64_t size, uint64_t translation), 0);
56 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
57 		uint64_t size), 0);
58 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
59 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
60 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
61 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
62 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int,
63 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0);
64 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
65 
66 struct spdk_trace_histories *g_trace_histories;
67 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
68 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
69 DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
70 		uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
71 		uint8_t arg1_type, const char *arg1_name));
72 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
73 				   uint32_t size, uint64_t object_id, uint64_t arg1));
74 
75 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
76 		struct spdk_nvmf_ctrlr_data *cdata));
77 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
78 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
79 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
80 		const struct spdk_nvme_transport_id *trid2), 0);
81 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
82 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
83 		struct spdk_dif_ctx *dif_ctx), false);
84 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
85 		enum spdk_nvme_transport_type trtype));
86 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
87 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
88 
89 const char *
90 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
91 {
92 	switch (trtype) {
93 	case SPDK_NVME_TRANSPORT_PCIE:
94 		return "PCIe";
95 	case SPDK_NVME_TRANSPORT_RDMA:
96 		return "RDMA";
97 	case SPDK_NVME_TRANSPORT_FC:
98 		return "FC";
99 	default:
100 		return NULL;
101 	}
102 }
103 
104 int
105 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
106 {
107 	int len, i;
108 
109 	if (trstring == NULL) {
110 		return -EINVAL;
111 	}
112 
113 	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
114 	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
115 		return -EINVAL;
116 	}
117 
118 	/* cast official trstring to uppercase version of input. */
119 	for (i = 0; i < len; i++) {
120 		trid->trstring[i] = toupper(trstring[i]);
121 	}
122 	return 0;
123 }
124 
125 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
126 {
127 	int i;
128 
129 	rdma_req->req.length = 0;
130 	rdma_req->req.data_from_pool = false;
131 	rdma_req->req.data = NULL;
132 	rdma_req->data.wr.num_sge = 0;
133 	rdma_req->data.wr.wr.rdma.remote_addr = 0;
134 	rdma_req->data.wr.wr.rdma.rkey = 0;
135 	memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
136 
137 	for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
138 		rdma_req->req.iov[i].iov_base = 0;
139 		rdma_req->req.iov[i].iov_len = 0;
140 		rdma_req->req.buffers[i] = 0;
141 		rdma_req->data.wr.sg_list[i].addr = 0;
142 		rdma_req->data.wr.sg_list[i].length = 0;
143 		rdma_req->data.wr.sg_list[i].lkey = 0;
144 	}
145 	rdma_req->req.iovcnt = 0;
146 }
147 
148 static void
149 test_spdk_nvmf_rdma_request_parse_sgl(void)
150 {
151 	struct spdk_nvmf_rdma_transport rtransport;
152 	struct spdk_nvmf_rdma_device device;
153 	struct spdk_nvmf_rdma_request rdma_req = {};
154 	struct spdk_nvmf_rdma_recv recv;
155 	struct spdk_nvmf_rdma_poll_group group;
156 	struct spdk_nvmf_rdma_qpair rqpair;
157 	struct spdk_nvmf_rdma_poller poller;
158 	union nvmf_c2h_msg cpl;
159 	union nvmf_h2c_msg cmd;
160 	struct spdk_nvme_sgl_descriptor *sgl;
161 	struct spdk_nvmf_transport_pg_cache_buf bufs[4];
162 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
163 	struct spdk_nvmf_rdma_request_data data;
164 	int rc, i;
165 
166 	data.wr.sg_list = data.sgl;
167 	STAILQ_INIT(&group.group.buf_cache);
168 	group.group.buf_cache_size = 0;
169 	group.group.buf_cache_count = 0;
170 	group.group.transport = &rtransport.transport;
171 	poller.group = &group;
172 	rqpair.poller = &poller;
173 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
174 
175 	sgl = &cmd.nvme_cmd.dptr.sgl1;
176 	rdma_req.recv = &recv;
177 	rdma_req.req.cmd = &cmd;
178 	rdma_req.req.rsp = &cpl;
179 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
180 	rdma_req.req.qpair = &rqpair.qpair;
181 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
182 
183 	rtransport.transport.opts = g_rdma_ut_transport_opts;
184 	rtransport.data_wr_pool = NULL;
185 	rtransport.transport.data_buf_pool = NULL;
186 
187 	device.attr.device_cap_flags = 0;
188 	sgl->keyed.key = 0xEEEE;
189 	sgl->address = 0xFFFF;
190 	rdma_req.recv->buf = (void *)0xDDDD;
191 
192 	/* Test 1: sgl type: keyed data block subtype: address */
193 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
194 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
195 
196 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
197 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
198 	reset_nvmf_rdma_request(&rdma_req);
199 	sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
200 
201 	device.map = (void *)0x0;
202 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
203 	CU_ASSERT(rc == 0);
204 	CU_ASSERT(rdma_req.req.data_from_pool == true);
205 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
206 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
207 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
208 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
209 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
210 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
211 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
212 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
213 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
214 
215 	/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
216 	reset_nvmf_rdma_request(&rdma_req);
217 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
218 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
219 
220 	CU_ASSERT(rc == 0);
221 	CU_ASSERT(rdma_req.req.data_from_pool == true);
222 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
223 	CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
224 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
225 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
226 	for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
227 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
228 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
229 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
230 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
231 	}
232 
233 	/* Part 3: simple I/O one SGL larger than the transport max io size */
234 	reset_nvmf_rdma_request(&rdma_req);
235 	sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
236 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
237 
238 	CU_ASSERT(rc == -1);
239 
240 	/* Part 4: Pretend there are no buffer pools */
241 	MOCK_SET(spdk_mempool_get, NULL);
242 	reset_nvmf_rdma_request(&rdma_req);
243 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
244 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
245 
246 	CU_ASSERT(rc == 0);
247 	CU_ASSERT(rdma_req.req.data_from_pool == false);
248 	CU_ASSERT(rdma_req.req.data == NULL);
249 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
250 	CU_ASSERT(rdma_req.req.buffers[0] == NULL);
251 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
252 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
253 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
254 
255 	rdma_req.recv->buf = (void *)0xDDDD;
256 	/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
257 	sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
258 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
259 
260 	/* Part 1: Normal I/O smaller than in capsule data size no offset */
261 	reset_nvmf_rdma_request(&rdma_req);
262 	sgl->address = 0;
263 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
264 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
265 
266 	CU_ASSERT(rc == 0);
267 	CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
268 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
269 	CU_ASSERT(rdma_req.req.data_from_pool == false);
270 
271 	/* Part 2: I/O offset + length too large */
272 	reset_nvmf_rdma_request(&rdma_req);
273 	sgl->address = rtransport.transport.opts.in_capsule_data_size;
274 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
275 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
276 
277 	CU_ASSERT(rc == -1);
278 
279 	/* Part 3: I/O too large */
280 	reset_nvmf_rdma_request(&rdma_req);
281 	sgl->address = 0;
282 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
283 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
284 
285 	CU_ASSERT(rc == -1);
286 
287 	/* Test 3: Multi SGL */
288 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
289 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
290 	sgl->address = 0;
291 	rdma_req.recv->buf = (void *)&sgl_desc;
292 	MOCK_SET(spdk_mempool_get, &data);
293 
294 	/* part 1: 2 segments each with 1 wr. */
295 	reset_nvmf_rdma_request(&rdma_req);
296 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
297 	for (i = 0; i < 2; i++) {
298 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
299 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
300 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
301 		sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
302 		sgl_desc[i].keyed.key = 0x44;
303 	}
304 
305 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
306 
307 	CU_ASSERT(rc == 0);
308 	CU_ASSERT(rdma_req.req.data_from_pool == true);
309 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
310 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
311 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
312 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
313 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
314 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
315 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
316 	CU_ASSERT(data.wr.num_sge == 1);
317 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
318 
319 	/* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
320 	reset_nvmf_rdma_request(&rdma_req);
321 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
322 	for (i = 0; i < 2; i++) {
323 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
324 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
325 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
326 		sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
327 		sgl_desc[i].keyed.key = 0x44;
328 	}
329 
330 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
331 
332 	CU_ASSERT(rc == 0);
333 	CU_ASSERT(rdma_req.req.data_from_pool == true);
334 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
335 	CU_ASSERT(rdma_req.req.iovcnt == 16);
336 	CU_ASSERT(rdma_req.data.wr.num_sge == 8);
337 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
338 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
339 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
340 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
341 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
342 	CU_ASSERT(data.wr.num_sge == 8);
343 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
344 
345 	/* part 3: 2 segments, one very large, one very small */
346 	reset_nvmf_rdma_request(&rdma_req);
347 	for (i = 0; i < 2; i++) {
348 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
349 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
350 		sgl_desc[i].keyed.key = 0x44;
351 	}
352 
353 	sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
354 				   rtransport.transport.opts.io_unit_size / 2;
355 	sgl_desc[0].address = 0x4000;
356 	sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
357 	sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
358 			      rtransport.transport.opts.io_unit_size / 2;
359 
360 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
361 
362 	CU_ASSERT(rc == 0);
363 	CU_ASSERT(rdma_req.req.data_from_pool == true);
364 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
365 	CU_ASSERT(rdma_req.req.iovcnt == 17);
366 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
367 	for (i = 0; i < 15; i++) {
368 		CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
369 	}
370 	CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
371 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
372 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
373 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
374 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
375 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
376 		  rtransport.transport.opts.io_unit_size / 2);
377 	CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
378 	CU_ASSERT(data.wr.num_sge == 1);
379 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
380 
381 	/* Test 4: use PG buffer cache */
382 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
383 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
384 	sgl->address = 0xFFFF;
385 	rdma_req.recv->buf = (void *)0xDDDD;
386 	sgl->keyed.key = 0xEEEE;
387 
388 	for (i = 0; i < 4; i++) {
389 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
390 	}
391 
392 	/* part 1: use the four buffers from the pg cache */
393 	group.group.buf_cache_size = 4;
394 	group.group.buf_cache_count = 4;
395 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
396 	reset_nvmf_rdma_request(&rdma_req);
397 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
398 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
399 
400 	SPDK_CU_ASSERT_FATAL(rc == 0);
401 	CU_ASSERT(rdma_req.req.data_from_pool == true);
402 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
403 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
404 			~NVMF_DATA_BUFFER_MASK));
405 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
406 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
407 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
408 	CU_ASSERT(group.group.buf_cache_count == 0);
409 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
410 	for (i = 0; i < 4; i++) {
411 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
412 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
413 				~NVMF_DATA_BUFFER_MASK));
414 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
415 	}
416 
417 	/* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
418 	reset_nvmf_rdma_request(&rdma_req);
419 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
420 
421 	SPDK_CU_ASSERT_FATAL(rc == 0);
422 	CU_ASSERT(rdma_req.req.data_from_pool == true);
423 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
424 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
425 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
426 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
427 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
428 	CU_ASSERT(group.group.buf_cache_count == 0);
429 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
430 	for (i = 0; i < 4; i++) {
431 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
432 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
433 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
434 		CU_ASSERT(group.group.buf_cache_count == 0);
435 	}
436 
437 	/* part 3: half and half */
438 	group.group.buf_cache_count = 2;
439 
440 	for (i = 0; i < 2; i++) {
441 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
442 	}
443 	reset_nvmf_rdma_request(&rdma_req);
444 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
445 
446 	SPDK_CU_ASSERT_FATAL(rc == 0);
447 	CU_ASSERT(rdma_req.req.data_from_pool == true);
448 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
449 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
450 			~NVMF_DATA_BUFFER_MASK));
451 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
452 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
453 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
454 	CU_ASSERT(group.group.buf_cache_count == 0);
455 	for (i = 0; i < 2; i++) {
456 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
457 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
458 				~NVMF_DATA_BUFFER_MASK));
459 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
460 	}
461 	for (i = 2; i < 4; i++) {
462 		CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
463 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
464 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
465 	}
466 
467 	reset_nvmf_rdma_request(&rdma_req);
468 }
469 
470 static struct spdk_nvmf_rdma_recv *
471 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
472 {
473 	struct spdk_nvmf_rdma_recv *rdma_recv;
474 	union nvmf_h2c_msg *cmd;
475 	struct spdk_nvme_sgl_descriptor *sgl;
476 
477 	rdma_recv = calloc(1, sizeof(*rdma_recv));
478 	rdma_recv->qpair = rqpair;
479 	cmd = calloc(1, sizeof(*cmd));
480 	rdma_recv->sgl[0].addr = (uintptr_t)cmd;
481 	cmd->nvme_cmd.opc = opc;
482 	sgl = &cmd->nvme_cmd.dptr.sgl1;
483 	sgl->keyed.key = 0xEEEE;
484 	sgl->address = 0xFFFF;
485 	sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
486 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
487 	sgl->keyed.length = 1;
488 
489 	return rdma_recv;
490 }
491 
492 static void
493 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
494 {
495 	free((void *)rdma_recv->sgl[0].addr);
496 	free(rdma_recv);
497 }
498 
499 static struct spdk_nvmf_rdma_request *
500 create_req(struct spdk_nvmf_rdma_qpair *rqpair,
501 	   struct spdk_nvmf_rdma_recv *rdma_recv)
502 {
503 	struct spdk_nvmf_rdma_request *rdma_req;
504 	union nvmf_c2h_msg *cpl;
505 
506 	rdma_req = calloc(1, sizeof(*rdma_req));
507 	rdma_req->recv = rdma_recv;
508 	rdma_req->req.qpair = &rqpair->qpair;
509 	rdma_req->state = RDMA_REQUEST_STATE_NEW;
510 	rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
511 	rdma_req->data.wr.sg_list = rdma_req->data.sgl;
512 	cpl = calloc(1, sizeof(*cpl));
513 	rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
514 	rdma_req->req.rsp = cpl;
515 
516 	return rdma_req;
517 }
518 
519 static void
520 free_req(struct spdk_nvmf_rdma_request *rdma_req)
521 {
522 	free((void *)rdma_req->rsp.sgl[0].addr);
523 	free(rdma_req);
524 }
525 
526 static void
527 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
528 	    struct spdk_nvmf_rdma_poller *poller,
529 	    struct spdk_nvmf_rdma_device *device,
530 	    struct spdk_nvmf_rdma_resources *resources,
531 	    struct spdk_nvmf_transport *transport)
532 {
533 	memset(rqpair, 0, sizeof(*rqpair));
534 	STAILQ_INIT(&rqpair->pending_rdma_write_queue);
535 	STAILQ_INIT(&rqpair->pending_rdma_read_queue);
536 	rqpair->poller = poller;
537 	rqpair->device = device;
538 	rqpair->resources = resources;
539 	rqpair->qpair.qid = 1;
540 	rqpair->ibv_state = IBV_QPS_RTS;
541 	rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
542 	rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
543 	rqpair->max_send_depth = 16;
544 	rqpair->max_read_depth = 16;
545 	rqpair->qpair.transport = transport;
546 }
547 
548 static void
549 poller_reset(struct spdk_nvmf_rdma_poller *poller,
550 	     struct spdk_nvmf_rdma_poll_group *group)
551 {
552 	memset(poller, 0, sizeof(*poller));
553 	STAILQ_INIT(&poller->qpairs_pending_recv);
554 	STAILQ_INIT(&poller->qpairs_pending_send);
555 	poller->group = group;
556 }
557 
558 static void
559 test_spdk_nvmf_rdma_request_process(void)
560 {
561 	struct spdk_nvmf_rdma_transport rtransport = {};
562 	struct spdk_nvmf_rdma_poll_group group = {};
563 	struct spdk_nvmf_rdma_poller poller = {};
564 	struct spdk_nvmf_rdma_device device = {};
565 	struct spdk_nvmf_rdma_resources resources = {};
566 	struct spdk_nvmf_rdma_qpair rqpair = {};
567 	struct spdk_nvmf_rdma_recv *rdma_recv;
568 	struct spdk_nvmf_rdma_request *rdma_req;
569 	bool progress;
570 
571 	STAILQ_INIT(&group.group.buf_cache);
572 	STAILQ_INIT(&group.group.pending_buf_queue);
573 	group.group.buf_cache_size = 0;
574 	group.group.buf_cache_count = 0;
575 	poller_reset(&poller, &group);
576 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
577 
578 	rtransport.transport.opts = g_rdma_ut_transport_opts;
579 	rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
580 	rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
581 				  sizeof(struct spdk_nvmf_rdma_request_data),
582 				  0, 0);
583 	MOCK_CLEAR(spdk_mempool_get);
584 
585 	device.attr.device_cap_flags = 0;
586 	device.map = (void *)0x0;
587 
588 	/* Test 1: single SGL READ request */
589 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
590 	rdma_req = create_req(&rqpair, rdma_recv);
591 	rqpair.current_recv_depth = 1;
592 	/* NEW -> EXECUTING */
593 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
594 	CU_ASSERT(progress == true);
595 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
596 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
597 	/* EXECUTED -> TRANSFERRING_C2H */
598 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
599 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
600 	CU_ASSERT(progress == true);
601 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
602 	CU_ASSERT(rdma_req->recv == NULL);
603 	/* COMPLETED -> FREE */
604 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
605 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
606 	CU_ASSERT(progress == true);
607 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
608 
609 	free_recv(rdma_recv);
610 	free_req(rdma_req);
611 	poller_reset(&poller, &group);
612 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
613 
614 	/* Test 2: single SGL WRITE request */
615 	rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
616 	rdma_req = create_req(&rqpair, rdma_recv);
617 	rqpair.current_recv_depth = 1;
618 	/* NEW -> TRANSFERRING_H2C */
619 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
620 	CU_ASSERT(progress == true);
621 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
622 	CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
623 	STAILQ_INIT(&poller.qpairs_pending_send);
624 	/* READY_TO_EXECUTE -> EXECUTING */
625 	rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
626 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
627 	CU_ASSERT(progress == true);
628 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
629 	/* EXECUTED -> COMPLETING */
630 	rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
631 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
632 	CU_ASSERT(progress == true);
633 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
634 	CU_ASSERT(rdma_req->recv == NULL);
635 	/* COMPLETED -> FREE */
636 	rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
637 	progress = nvmf_rdma_request_process(&rtransport, rdma_req);
638 	CU_ASSERT(progress == true);
639 	CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
640 
641 	free_recv(rdma_recv);
642 	free_req(rdma_req);
643 	poller_reset(&poller, &group);
644 	qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
645 
646 	/* Test 3: WRITE+WRITE ibv_send batching */
647 	{
648 		struct spdk_nvmf_rdma_recv *recv1, *recv2;
649 		struct spdk_nvmf_rdma_request *req1, *req2;
650 		recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
651 		req1 = create_req(&rqpair, recv1);
652 		recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
653 		req2 = create_req(&rqpair, recv2);
654 
655 		/* WRITE 1: NEW -> TRANSFERRING_H2C */
656 		rqpair.current_recv_depth = 1;
657 		nvmf_rdma_request_process(&rtransport, req1);
658 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
659 
660 		/* WRITE 2: NEW -> TRANSFERRING_H2C */
661 		rqpair.current_recv_depth = 2;
662 		nvmf_rdma_request_process(&rtransport, req2);
663 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
664 
665 		STAILQ_INIT(&poller.qpairs_pending_send);
666 
667 		/* WRITE 1 completes before WRITE 2 has finished RDMA reading */
668 		/* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
669 		req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
670 		nvmf_rdma_request_process(&rtransport, req1);
671 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
672 		/* WRITE 1: EXECUTED -> COMPLETING */
673 		req1->state = RDMA_REQUEST_STATE_EXECUTED;
674 		nvmf_rdma_request_process(&rtransport, req1);
675 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
676 		STAILQ_INIT(&poller.qpairs_pending_send);
677 		/* WRITE 1: COMPLETED -> FREE */
678 		req1->state = RDMA_REQUEST_STATE_COMPLETED;
679 		nvmf_rdma_request_process(&rtransport, req1);
680 		CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
681 
682 		/* Now WRITE 2 has finished reading and completes */
683 		/* WRITE 2: COMPLETED -> FREE */
684 		/* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
685 		req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
686 		nvmf_rdma_request_process(&rtransport, req2);
687 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
688 		/* WRITE 1: EXECUTED -> COMPLETING */
689 		req2->state = RDMA_REQUEST_STATE_EXECUTED;
690 		nvmf_rdma_request_process(&rtransport, req2);
691 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
692 		STAILQ_INIT(&poller.qpairs_pending_send);
693 		/* WRITE 1: COMPLETED -> FREE */
694 		req2->state = RDMA_REQUEST_STATE_COMPLETED;
695 		nvmf_rdma_request_process(&rtransport, req2);
696 		CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
697 
698 		free_recv(recv1);
699 		free_req(req1);
700 		free_recv(recv2);
701 		free_req(req2);
702 		poller_reset(&poller, &group);
703 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
704 	}
705 
706 	/* Test 4, invalid command, check xfer type */
707 	{
708 		struct spdk_nvmf_rdma_recv *rdma_recv_inv;
709 		struct spdk_nvmf_rdma_request *rdma_req_inv;
710 		/* construct an opcode that specifies BIDIRECTIONAL transfer */
711 		uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
712 
713 		rdma_recv_inv = create_recv(&rqpair, opc);
714 		rdma_req_inv = create_req(&rqpair, rdma_recv_inv);
715 
716 		/* NEW -> RDMA_REQUEST_STATE_COMPLETING */
717 		rqpair.current_recv_depth = 1;
718 		progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv);
719 		CU_ASSERT(progress == true);
720 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING);
721 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
722 		CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
723 
724 		/* RDMA_REQUEST_STATE_COMPLETED -> FREE */
725 		rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED;
726 		nvmf_rdma_request_process(&rtransport, rdma_req_inv);
727 		CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE);
728 
729 		free_recv(rdma_recv_inv);
730 		free_req(rdma_req_inv);
731 		poller_reset(&poller, &group);
732 		qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport);
733 	}
734 
735 	spdk_mempool_free(rtransport.transport.data_buf_pool);
736 	spdk_mempool_free(rtransport.data_wr_pool);
737 }
738 
739 #define TEST_GROUPS_COUNT 5
740 static void
741 test_nvmf_rdma_get_optimal_poll_group(void)
742 {
743 	struct spdk_nvmf_rdma_transport rtransport = {};
744 	struct spdk_nvmf_transport *transport = &rtransport.transport;
745 	struct spdk_nvmf_rdma_qpair rqpair = {};
746 	struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
747 	struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
748 	struct spdk_nvmf_transport_poll_group *result;
749 	uint32_t i;
750 
751 	rqpair.qpair.transport = transport;
752 	pthread_mutex_init(&rtransport.lock, NULL);
753 	TAILQ_INIT(&rtransport.poll_groups);
754 
755 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
756 		groups[i] = nvmf_rdma_poll_group_create(transport);
757 		CU_ASSERT(groups[i] != NULL);
758 		rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
759 		groups[i]->transport = transport;
760 	}
761 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
762 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
763 
764 	/* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
765 	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
766 		rqpair.qpair.qid = 0;
767 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
768 		CU_ASSERT(result == groups[i]);
769 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
770 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
771 
772 		rqpair.qpair.qid = 1;
773 		result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
774 		CU_ASSERT(result == groups[i]);
775 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
776 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
777 	}
778 	/* wrap around, admin/io pg point to the first pg
779 	   Destroy all poll groups except of the last one */
780 	for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
781 		nvmf_rdma_poll_group_destroy(groups[i]);
782 		CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
783 		CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
784 	}
785 
786 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
787 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
788 
789 	/* Check that pointers to the next admin/io poll groups are not changed */
790 	rqpair.qpair.qid = 0;
791 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
792 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
793 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
794 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
795 
796 	rqpair.qpair.qid = 1;
797 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
798 	CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
799 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
800 	CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
801 
802 	/* Remove the last poll group, check that pointers are NULL */
803 	nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
804 	CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
805 	CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
806 
807 	/* Request optimal poll group, result must be NULL */
808 	rqpair.qpair.qid = 0;
809 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
810 	CU_ASSERT(result == NULL);
811 
812 	rqpair.qpair.qid = 1;
813 	result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
814 	CU_ASSERT(result == NULL);
815 
816 	pthread_mutex_destroy(&rtransport.lock);
817 }
818 #undef TEST_GROUPS_COUNT
819 
820 static void
821 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
822 {
823 	struct spdk_nvmf_rdma_transport rtransport;
824 	struct spdk_nvmf_rdma_device device;
825 	struct spdk_nvmf_rdma_request rdma_req = {};
826 	struct spdk_nvmf_rdma_recv recv;
827 	struct spdk_nvmf_rdma_poll_group group;
828 	struct spdk_nvmf_rdma_qpair rqpair;
829 	struct spdk_nvmf_rdma_poller poller;
830 	union nvmf_c2h_msg cpl;
831 	union nvmf_h2c_msg cmd;
832 	struct spdk_nvme_sgl_descriptor *sgl;
833 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
834 	char data_buffer[8192];
835 	struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer;
836 	char data2_buffer[8192];
837 	struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer;
838 	const uint32_t data_bs = 512;
839 	const uint32_t md_size = 8;
840 	int rc, i;
841 	void *aligned_buffer;
842 
843 	data->wr.sg_list = data->sgl;
844 	STAILQ_INIT(&group.group.buf_cache);
845 	group.group.buf_cache_size = 0;
846 	group.group.buf_cache_count = 0;
847 	group.group.transport = &rtransport.transport;
848 	poller.group = &group;
849 	rqpair.poller = &poller;
850 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
851 
852 	sgl = &cmd.nvme_cmd.dptr.sgl1;
853 	rdma_req.recv = &recv;
854 	rdma_req.req.cmd = &cmd;
855 	rdma_req.req.rsp = &cpl;
856 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
857 	rdma_req.req.qpair = &rqpair.qpair;
858 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
859 
860 	rtransport.transport.opts = g_rdma_ut_transport_opts;
861 	rtransport.data_wr_pool = NULL;
862 	rtransport.transport.data_buf_pool = NULL;
863 
864 	device.attr.device_cap_flags = 0;
865 	device.map = NULL;
866 	sgl->keyed.key = 0xEEEE;
867 	sgl->address = 0xFFFF;
868 	rdma_req.recv->buf = (void *)0xDDDD;
869 
870 	/* Test 1: sgl type: keyed data block subtype: address */
871 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
872 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
873 
874 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
875 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
876 	reset_nvmf_rdma_request(&rdma_req);
877 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
878 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
879 			  0, 0, 0, 0, 0);
880 	rdma_req.req.dif.dif_insert_or_strip = true;
881 	rtransport.transport.opts.io_unit_size = data_bs * 8;
882 	sgl->keyed.length = data_bs * 4;
883 
884 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
885 
886 	CU_ASSERT(rc == 0);
887 	CU_ASSERT(rdma_req.req.data_from_pool == true);
888 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
889 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
890 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
891 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
892 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
893 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
894 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
895 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
896 
897 	for (i = 0; i < 4; ++i) {
898 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
899 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
900 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
901 	}
902 
903 	/* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
904 		block size 512 */
905 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
906 	reset_nvmf_rdma_request(&rdma_req);
907 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
908 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
909 			  0, 0, 0, 0, 0);
910 	rdma_req.req.dif.dif_insert_or_strip = true;
911 	rtransport.transport.opts.io_unit_size = data_bs * 4;
912 	sgl->keyed.length = data_bs * 4;
913 
914 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
915 
916 	CU_ASSERT(rc == 0);
917 	CU_ASSERT(rdma_req.req.data_from_pool == true);
918 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
919 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
920 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
921 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
922 	CU_ASSERT(rdma_req.data.wr.num_sge == 5);
923 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
924 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
925 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
926 
927 	for (i = 0; i < 3; ++i) {
928 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
929 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
930 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
931 	}
932 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
933 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
934 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
935 
936 	/* 2nd buffer consumed */
937 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
938 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
939 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
940 
941 	/* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
942 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
943 	reset_nvmf_rdma_request(&rdma_req);
944 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
945 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
946 			  0, 0, 0, 0, 0);
947 	rdma_req.req.dif.dif_insert_or_strip = true;
948 	rtransport.transport.opts.io_unit_size = data_bs;
949 	sgl->keyed.length = data_bs;
950 
951 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
952 
953 	CU_ASSERT(rc == 0);
954 	CU_ASSERT(rdma_req.req.data_from_pool == true);
955 	CU_ASSERT(rdma_req.req.length == data_bs);
956 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
957 	CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
958 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
959 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
960 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
961 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
962 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
963 
964 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
965 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
966 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
967 
968 	CU_ASSERT(rdma_req.req.iovcnt == 2);
969 	CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
970 	CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
971 	/* 2nd buffer consumed for metadata */
972 	CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
973 	CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
974 
975 	/* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
976 	   block size 512 */
977 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
978 	reset_nvmf_rdma_request(&rdma_req);
979 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
980 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
981 			  0, 0, 0, 0, 0);
982 	rdma_req.req.dif.dif_insert_or_strip = true;
983 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
984 	sgl->keyed.length = data_bs * 4;
985 
986 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
987 
988 	CU_ASSERT(rc == 0);
989 	CU_ASSERT(rdma_req.req.data_from_pool == true);
990 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
991 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
992 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
993 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
994 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
995 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
996 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
997 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
998 
999 	for (i = 0; i < 4; ++i) {
1000 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1001 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1002 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1003 	}
1004 
1005 	/* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
1006 	   block size 512 */
1007 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1008 	reset_nvmf_rdma_request(&rdma_req);
1009 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1010 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1011 			  0, 0, 0, 0, 0);
1012 	rdma_req.req.dif.dif_insert_or_strip = true;
1013 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
1014 	sgl->keyed.length = data_bs * 4;
1015 
1016 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1017 
1018 	CU_ASSERT(rc == 0);
1019 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1020 	CU_ASSERT(rdma_req.req.length == data_bs * 4);
1021 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1022 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
1023 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1024 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
1025 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1026 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1027 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1028 
1029 	for (i = 0; i < 2; ++i) {
1030 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1031 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1032 	}
1033 	for (i = 0; i < 2; ++i) {
1034 		CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size));
1035 		CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs);
1036 	}
1037 
1038 	/* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
1039 	   block size 512 */
1040 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1041 	reset_nvmf_rdma_request(&rdma_req);
1042 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1043 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1044 			  0, 0, 0, 0, 0);
1045 	rdma_req.req.dif.dif_insert_or_strip = true;
1046 	rtransport.transport.opts.io_unit_size = data_bs * 4;
1047 	sgl->keyed.length = data_bs * 6;
1048 
1049 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1050 
1051 	CU_ASSERT(rc == 0);
1052 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1053 	CU_ASSERT(rdma_req.req.length == data_bs * 6);
1054 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1055 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
1056 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
1057 	CU_ASSERT(rdma_req.data.wr.num_sge == 7);
1058 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1059 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1060 	CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
1061 
1062 	for (i = 0; i < 3; ++i) {
1063 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
1064 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1065 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY);
1066 	}
1067 	CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
1068 	CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
1069 	CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY);
1070 
1071 	/* 2nd IO buffer consumed */
1072 	CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
1073 	CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
1074 	CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY);
1075 
1076 	CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
1077 	CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
1078 	CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY);
1079 
1080 	CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
1081 	CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
1082 	CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY);
1083 
1084 	/* Part 7: simple I/O, number of SGL entries exceeds the number of entries
1085 	   one WR can hold. Additional WR is chained */
1086 	MOCK_SET(spdk_mempool_get, data2_buffer);
1087 	aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) &
1088 				  ~NVMF_DATA_BUFFER_MASK);
1089 	reset_nvmf_rdma_request(&rdma_req);
1090 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1091 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1092 			  0, 0, 0, 0, 0);
1093 	rdma_req.req.dif.dif_insert_or_strip = true;
1094 	rtransport.transport.opts.io_unit_size = data_bs * 16;
1095 	sgl->keyed.length = data_bs * 16;
1096 
1097 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1098 
1099 	CU_ASSERT(rc == 0);
1100 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1101 	CU_ASSERT(rdma_req.req.length == data_bs * 16);
1102 	CU_ASSERT(rdma_req.req.iovcnt == 2);
1103 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1104 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
1105 	CU_ASSERT(rdma_req.req.data == aligned_buffer);
1106 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
1107 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1108 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1109 	/* additional wr from pool */
1110 	CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr);
1111 	CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
1112 	CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
1113 
1114 	/* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
1115 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
1116 	reset_nvmf_rdma_request(&rdma_req);
1117 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1118 			  SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
1119 			  0, 0, 0, 0, 0);
1120 	rdma_req.req.dif.dif_insert_or_strip = true;
1121 	rtransport.transport.opts.io_unit_size = 516;
1122 	sgl->keyed.length = data_bs * 2;
1123 
1124 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1125 
1126 	CU_ASSERT(rc == 0);
1127 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1128 	CU_ASSERT(rdma_req.req.length == data_bs * 2);
1129 	CU_ASSERT(rdma_req.req.iovcnt == 3);
1130 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1131 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
1132 	CU_ASSERT(rdma_req.req.data == (void *)0x2000);
1133 	CU_ASSERT(rdma_req.data.wr.num_sge == 2);
1134 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
1135 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
1136 
1137 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
1138 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
1139 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY);
1140 
1141 	/* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
1142 	  is located at the beginning of that buffer */
1143 	CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
1144 	CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
1145 	CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY);
1146 
1147 	/* Test 2: Multi SGL */
1148 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
1149 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
1150 	sgl->address = 0;
1151 	rdma_req.recv->buf = (void *)&sgl_desc;
1152 	MOCK_SET(spdk_mempool_get, data_buffer);
1153 	aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) &
1154 				  ~NVMF_DATA_BUFFER_MASK);
1155 
1156 	/* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
1157 	reset_nvmf_rdma_request(&rdma_req);
1158 	spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
1159 			  SPDK_DIF_TYPE1,
1160 			  SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
1161 	rdma_req.req.dif.dif_insert_or_strip = true;
1162 	rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
1163 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
1164 
1165 	for (i = 0; i < 2; i++) {
1166 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
1167 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
1168 		sgl_desc[i].keyed.length = data_bs * 4;
1169 		sgl_desc[i].address = 0x4000 + i * data_bs * 4;
1170 		sgl_desc[i].keyed.key = 0x44;
1171 	}
1172 
1173 	rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
1174 
1175 	CU_ASSERT(rc == 0);
1176 	CU_ASSERT(rdma_req.req.data_from_pool == true);
1177 	CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
1178 	CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
1179 	CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
1180 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
1181 	for (i = 0; i < 4; ++i) {
1182 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
1183 			  (data_bs + md_size));
1184 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
1185 	}
1186 
1187 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
1188 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
1189 	CU_ASSERT(rdma_req.data.wr.next == &data->wr);
1190 	CU_ASSERT(data->wr.wr.rdma.rkey == 0x44);
1191 	CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
1192 	CU_ASSERT(data->wr.num_sge == 4);
1193 	for (i = 0; i < 4; ++i) {
1194 		CU_ASSERT(data->wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
1195 			  (data_bs + md_size));
1196 		CU_ASSERT(data->wr.sg_list[i].length == data_bs);
1197 	}
1198 
1199 	CU_ASSERT(data->wr.next == &rdma_req.rsp.wr);
1200 }
1201 
1202 int main(int argc, char **argv)
1203 {
1204 	CU_pSuite	suite = NULL;
1205 	unsigned int	num_failures;
1206 
1207 	CU_set_error_action(CUEA_ABORT);
1208 	CU_initialize_registry();
1209 
1210 	suite = CU_add_suite("nvmf", NULL, NULL);
1211 
1212 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
1213 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
1214 	CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
1215 	CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
1216 
1217 	CU_basic_set_mode(CU_BRM_VERBOSE);
1218 	CU_basic_run_tests();
1219 	num_failures = CU_get_number_of_failures();
1220 	CU_cleanup_registry();
1221 	return num_failures;
1222 }
1223