xref: /spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c (revision bb488d2829a9b7863daab45917dd2174905cc0ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk_cunit.h"
36 #include "common/lib/test_env.c"
37 #include "nvmf/rdma.c"
38 
39 uint64_t g_mr_size;
40 struct ibv_mr g_rdma_mr;
41 
42 #define RDMA_UT_UNITS_IN_MAX_IO 16
43 
44 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
45 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
46 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
47 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
48 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
49 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
50 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
51 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
52 };
53 
54 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
55 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
56 		uint64_t size, uint64_t translation), 0);
57 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
58 		uint64_t size), 0);
59 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
60 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
61 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
62 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
63 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
64 
65 struct spdk_trace_histories *g_trace_histories;
66 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
67 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
68 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
69 		uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
70 		uint8_t arg1_is_ptr, const char *arg1_name));
71 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
72 				   uint32_t size, uint64_t object_id, uint64_t arg1));
73 
74 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
75 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
76 		const struct spdk_nvme_transport_id *trid2), 0);
77 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
78 
79 uint64_t
80 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
81 {
82 	if (g_mr_size != 0) {
83 		*(uint32_t *)size = g_mr_size;
84 	}
85 
86 	return (uint64_t)&g_rdma_mr;
87 }
88 
89 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
90 {
91 	int i;
92 
93 	rdma_req->req.length = 0;
94 	rdma_req->data_from_pool = false;
95 	rdma_req->req.data = NULL;
96 	rdma_req->data.wr.num_sge = 0;
97 	rdma_req->data.wr.wr.rdma.remote_addr = 0;
98 	rdma_req->data.wr.wr.rdma.rkey = 0;
99 
100 	for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
101 		rdma_req->req.iov[i].iov_base = 0;
102 		rdma_req->req.iov[i].iov_len = 0;
103 		rdma_req->buffers[i] = 0;
104 		rdma_req->data.wr.sg_list[i].addr = 0;
105 		rdma_req->data.wr.sg_list[i].length = 0;
106 		rdma_req->data.wr.sg_list[i].lkey = 0;
107 	}
108 }
109 
110 static void
111 test_spdk_nvmf_rdma_request_parse_sgl(void)
112 {
113 	struct spdk_nvmf_rdma_transport rtransport;
114 	struct spdk_nvmf_rdma_device device;
115 	struct spdk_nvmf_rdma_request rdma_req;
116 	struct spdk_nvmf_rdma_recv recv;
117 	struct spdk_nvmf_rdma_poll_group group;
118 	struct spdk_nvmf_rdma_qpair rqpair;
119 	struct spdk_nvmf_rdma_poller poller;
120 	union nvmf_c2h_msg cpl;
121 	union nvmf_h2c_msg cmd;
122 	struct spdk_nvme_sgl_descriptor *sgl;
123 	struct spdk_nvmf_transport_pg_cache_buf bufs[4];
124 	struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
125 	struct spdk_nvmf_rdma_request_data data;
126 	int rc, i;
127 
128 	data.wr.sg_list = data.sgl;
129 	STAILQ_INIT(&group.group.buf_cache);
130 	group.group.buf_cache_size = 0;
131 	group.group.buf_cache_count = 0;
132 	poller.group = &group;
133 	rqpair.poller = &poller;
134 	rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
135 
136 	sgl = &cmd.nvme_cmd.dptr.sgl1;
137 	rdma_req.recv = &recv;
138 	rdma_req.req.cmd = &cmd;
139 	rdma_req.req.rsp = &cpl;
140 	rdma_req.data.wr.sg_list = rdma_req.data.sgl;
141 	rdma_req.req.qpair = &rqpair.qpair;
142 	rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
143 
144 	rtransport.transport.opts = g_rdma_ut_transport_opts;
145 	rtransport.data_wr_pool = NULL;
146 	rtransport.transport.data_buf_pool = NULL;
147 
148 	device.attr.device_cap_flags = 0;
149 	g_rdma_mr.lkey = 0xABCD;
150 	sgl->keyed.key = 0xEEEE;
151 	sgl->address = 0xFFFF;
152 	rdma_req.recv->buf = (void *)0xDDDD;
153 
154 	/* Test 1: sgl type: keyed data block subtype: address */
155 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
156 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
157 
158 	/* Part 1: simple I/O, one SGL smaller than the transport io unit size */
159 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
160 	reset_nvmf_rdma_request(&rdma_req);
161 	sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
162 
163 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
164 	CU_ASSERT(rc == 0);
165 	CU_ASSERT(rdma_req.data_from_pool == true);
166 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
167 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
168 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
169 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
170 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
171 	CU_ASSERT((uint64_t)rdma_req.buffers[0] == 0x2000);
172 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
173 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
174 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
175 
176 	/* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
177 	reset_nvmf_rdma_request(&rdma_req);
178 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
179 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
180 
181 	CU_ASSERT(rc == 0);
182 	CU_ASSERT(rdma_req.data_from_pool == true);
183 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
184 	CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
185 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
186 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
187 	for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
188 		CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
189 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
190 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
191 		CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
192 	}
193 
194 	/* Part 3: simple I/O one SGL larger than the transport max io size */
195 	reset_nvmf_rdma_request(&rdma_req);
196 	sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
197 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
198 
199 	CU_ASSERT(rc == -1);
200 
201 	/* Part 4: Pretend there are no buffer pools */
202 	MOCK_SET(spdk_mempool_get, NULL);
203 	reset_nvmf_rdma_request(&rdma_req);
204 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
205 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
206 
207 	CU_ASSERT(rc == 0);
208 	CU_ASSERT(rdma_req.data_from_pool == false);
209 	CU_ASSERT(rdma_req.req.data == NULL);
210 	CU_ASSERT(rdma_req.data.wr.num_sge == 0);
211 	CU_ASSERT(rdma_req.buffers[0] == NULL);
212 	CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
213 	CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
214 	CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
215 
216 
217 	rdma_req.recv->buf = (void *)0xDDDD;
218 	/* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
219 	sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
220 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
221 
222 	/* Part 1: Normal I/O smaller than in capsule data size no offset */
223 	reset_nvmf_rdma_request(&rdma_req);
224 	sgl->address = 0;
225 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
226 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
227 
228 	CU_ASSERT(rc == 0);
229 	CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
230 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
231 	CU_ASSERT(rdma_req.data_from_pool == false);
232 
233 	/* Part 2: I/O offset + length too large */
234 	reset_nvmf_rdma_request(&rdma_req);
235 	sgl->address = rtransport.transport.opts.in_capsule_data_size;
236 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
237 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
238 
239 	CU_ASSERT(rc == -1);
240 
241 	/* Part 3: I/O too large */
242 	reset_nvmf_rdma_request(&rdma_req);
243 	sgl->address = 0;
244 	sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
245 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
246 
247 	CU_ASSERT(rc == -1);
248 
249 	/* Test 3: Multi SGL */
250 	sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
251 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
252 	sgl->address = 0;
253 	rdma_req.recv->buf = (void *)&sgl_desc;
254 	MOCK_SET(spdk_mempool_get, &data);
255 
256 	/* part 1: 2 segments each with 1 wr. */
257 	reset_nvmf_rdma_request(&rdma_req);
258 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
259 	for (i = 0; i < 2; i++) {
260 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
261 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
262 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
263 		sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
264 		sgl_desc[i].keyed.key = 0x44;
265 	}
266 
267 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
268 
269 	CU_ASSERT(rc == 0);
270 	CU_ASSERT(rdma_req.data_from_pool == true);
271 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
272 	CU_ASSERT(rdma_req.data.wr.num_sge == 1);
273 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
274 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
275 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
276 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
277 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
278 	CU_ASSERT(data.wr.num_sge == 1);
279 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
280 
281 	/* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
282 	reset_nvmf_rdma_request(&rdma_req);
283 	sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
284 	for (i = 0; i < 2; i++) {
285 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
286 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
287 		sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
288 		sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
289 		sgl_desc[i].keyed.key = 0x44;
290 	}
291 
292 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
293 
294 	CU_ASSERT(rc == 0);
295 	CU_ASSERT(rdma_req.data_from_pool == true);
296 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
297 	CU_ASSERT(rdma_req.req.iovcnt == 16);
298 	CU_ASSERT(rdma_req.data.wr.num_sge == 8);
299 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
300 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
301 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
302 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
303 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
304 	CU_ASSERT(data.wr.num_sge == 8);
305 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
306 
307 	/* part 3: 2 segments, one very large, one very small */
308 	reset_nvmf_rdma_request(&rdma_req);
309 	for (i = 0; i < 2; i++) {
310 		sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
311 		sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
312 		sgl_desc[i].keyed.key = 0x44;
313 	}
314 
315 	sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
316 				   rtransport.transport.opts.io_unit_size / 2;
317 	sgl_desc[0].address = 0x4000;
318 	sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
319 	sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
320 			      rtransport.transport.opts.io_unit_size / 2;
321 
322 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
323 
324 	CU_ASSERT(rc == 0);
325 	CU_ASSERT(rdma_req.data_from_pool == true);
326 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
327 	CU_ASSERT(rdma_req.req.iovcnt == 17);
328 	CU_ASSERT(rdma_req.data.wr.num_sge == 16);
329 	for (i = 0; i < 15; i++) {
330 		CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
331 	}
332 	CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
333 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
334 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
335 	CU_ASSERT(rdma_req.data.wr.next == &data.wr);
336 	CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
337 	CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
338 		  rtransport.transport.opts.io_unit_size / 2);
339 	CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
340 	CU_ASSERT(data.wr.num_sge == 1);
341 	CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
342 
343 	/* Test 4: use PG buffer cache */
344 	sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
345 	sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
346 	sgl->address = 0xFFFF;
347 	rdma_req.recv->buf = (void *)0xDDDD;
348 	g_rdma_mr.lkey = 0xABCD;
349 	sgl->keyed.key = 0xEEEE;
350 
351 	for (i = 0; i < 4; i++) {
352 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
353 	}
354 
355 	/* part 1: use the four buffers from the pg cache */
356 
357 	group.group.buf_cache_size = 4;
358 	group.group.buf_cache_count = 4;
359 	MOCK_SET(spdk_mempool_get, (void *)0x2000);
360 	reset_nvmf_rdma_request(&rdma_req);
361 	sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
362 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
363 
364 	SPDK_CU_ASSERT_FATAL(rc == 0);
365 	CU_ASSERT(rdma_req.data_from_pool == true);
366 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
367 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
368 			~NVMF_DATA_BUFFER_MASK));
369 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
370 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
371 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
372 	CU_ASSERT(group.group.buf_cache_count == 0);
373 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
374 	for (i = 0; i < 4; i++) {
375 		CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
376 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
377 				~NVMF_DATA_BUFFER_MASK));
378 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
379 	}
380 	/* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
381 
382 	reset_nvmf_rdma_request(&rdma_req);
383 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
384 
385 	SPDK_CU_ASSERT_FATAL(rc == 0);
386 	CU_ASSERT(rdma_req.data_from_pool == true);
387 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
388 	CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
389 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
390 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
391 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
392 	CU_ASSERT(group.group.buf_cache_count == 0);
393 	CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
394 	for (i = 0; i < 4; i++) {
395 		CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
396 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
397 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
398 		CU_ASSERT(group.group.buf_cache_count == 0);
399 	}
400 
401 	/* part 3: half and half */
402 	group.group.buf_cache_count = 2;
403 
404 	for (i = 0; i < 2; i++) {
405 		STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
406 	}
407 	reset_nvmf_rdma_request(&rdma_req);
408 	rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
409 
410 	SPDK_CU_ASSERT_FATAL(rc == 0);
411 	CU_ASSERT(rdma_req.data_from_pool == true);
412 	CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
413 	CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
414 			~NVMF_DATA_BUFFER_MASK));
415 	CU_ASSERT(rdma_req.data.wr.num_sge == 4);
416 	CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
417 	CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
418 	CU_ASSERT(group.group.buf_cache_count == 0);
419 	for (i = 0; i < 2; i++) {
420 		CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]);
421 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
422 				~NVMF_DATA_BUFFER_MASK));
423 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
424 	}
425 	for (i = 2; i < 4; i++) {
426 		CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000);
427 		CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
428 		CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
429 	}
430 }
431 
432 int main(int argc, char **argv)
433 {
434 	CU_pSuite	suite = NULL;
435 	unsigned int	num_failures;
436 
437 	if (CU_initialize_registry() != CUE_SUCCESS) {
438 		return CU_get_error();
439 	}
440 
441 	suite = CU_add_suite("nvmf", NULL, NULL);
442 	if (suite == NULL) {
443 		CU_cleanup_registry();
444 		return CU_get_error();
445 	}
446 
447 	if (
448 		CU_add_test(suite, "test_parse_sgl", test_spdk_nvmf_rdma_request_parse_sgl) == NULL) {
449 		CU_cleanup_registry();
450 		return CU_get_error();
451 	}
452 
453 	CU_basic_set_mode(CU_BRM_VERBOSE);
454 	CU_basic_run_tests();
455 	num_failures = CU_get_number_of_failures();
456 	CU_cleanup_registry();
457 	return num_failures;
458 }
459