xref: /spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c (revision 8dd1cd2104ea4001e4a0da2a4851ccd62c82f8e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "nvme/nvme_rdma.c"
9 #include "common/lib/nvme/common_stubs.h"
10 #include "common/lib/test_rdma.c"
11 
12 SPDK_LOG_REGISTER_COMPONENT(nvme)
13 
14 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
15 		uint64_t size, uint64_t translation), 0);
16 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
17 		uint64_t size), 0);
18 
19 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
20 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
21 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
22 
23 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
24 
25 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
26 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
27 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
28 
29 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
30 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
31 DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0);
32 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
33 
34 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
35 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
36 
37 DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *,
38 	    (struct spdk_memory_domain *device), NULL);
39 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
40 	    (struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
41 DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
42 DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_domain,
43 		void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
44 		uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
45 
46 DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
47 int
48 spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
49 			  struct spdk_memory_domain_ctx *ctx, const char *id)
50 {
51 	static struct spdk_memory_domain *__dma_dev = (struct spdk_memory_domain *)0xdeaddead;
52 
53 	HANDLE_RETURN_MOCK(spdk_memory_domain_create);
54 
55 	*domain = __dma_dev;
56 
57 	return 0;
58 }
59 
60 static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) };
61 
62 DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int);
63 int
64 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
65 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
66 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
67 {
68 
69 	HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data);
70 
71 	memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation));
72 
73 	return 0;
74 }
75 
76 /* ibv_reg_mr can be a macro, need to undefine it */
77 #ifdef ibv_reg_mr
78 #undef ibv_reg_mr
79 #endif
80 
81 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
82 struct ibv_mr *
83 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
84 {
85 	HANDLE_RETURN_MOCK(ibv_reg_mr);
86 	if (length > 0) {
87 		return &g_rdma_mr;
88 	} else {
89 		return NULL;
90 	}
91 }
92 
93 struct nvme_rdma_ut_bdev_io {
94 	struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
95 	int iovpos;
96 	int iovcnt;
97 };
98 
99 DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
100 struct ibv_context **
101 rdma_get_devices(int *num_devices)
102 {
103 	static struct ibv_context *_contexts[] = {
104 		(struct ibv_context *)0xDEADBEEF,
105 		(struct ibv_context *)0xFEEDBEEF,
106 		NULL
107 	};
108 
109 	HANDLE_RETURN_MOCK(rdma_get_devices);
110 	return _contexts;
111 }
112 
113 DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *);
114 struct rdma_event_channel *
115 rdma_create_event_channel(void)
116 {
117 	HANDLE_RETURN_MOCK(rdma_create_event_channel);
118 	return NULL;
119 }
120 
121 DEFINE_RETURN_MOCK(ibv_query_device, int);
122 int
123 ibv_query_device(struct ibv_context *context,
124 		 struct ibv_device_attr *device_attr)
125 {
126 	if (device_attr) {
127 		device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
128 	}
129 	HANDLE_RETURN_MOCK(ibv_query_device);
130 
131 	return 0;
132 }
133 
134 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
135 static void
136 nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
137 {
138 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
139 	struct iovec *iov;
140 
141 	for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
142 		iov = &bio->iovs[bio->iovpos];
143 		/* Only provide offsets at the beginning of an iov */
144 		if (offset == 0) {
145 			break;
146 		}
147 
148 		offset -= iov->iov_len;
149 	}
150 
151 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
152 }
153 
154 static int
155 nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
156 {
157 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
158 	struct iovec *iov;
159 
160 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
161 
162 	if (bio->iovpos == bio->iovcnt) {
163 		return -1;
164 	}
165 
166 	iov = &bio->iovs[bio->iovpos];
167 
168 	*address = iov->iov_base;
169 	*length = iov->iov_len;
170 	bio->iovpos++;
171 
172 	return 0;
173 }
174 
175 static void
176 test_nvme_rdma_build_sgl_request(void)
177 {
178 	struct nvme_rdma_qpair rqpair;
179 	struct spdk_nvme_ctrlr ctrlr = {0};
180 	struct spdk_nvmf_cmd cmd = {{0}};
181 	struct spdk_nvme_rdma_req rdma_req = {0};
182 	struct nvme_request req = {{0}};
183 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
184 	uint64_t i;
185 	int rc;
186 
187 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
188 	ctrlr.cdata.nvmf_specific.msdbd = 16;
189 	ctrlr.ioccsz_bytes = 4096;
190 
191 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
192 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
193 	rqpair.qpair.ctrlr = &ctrlr;
194 	rqpair.cmds = &cmd;
195 	cmd.sgl[0].address = 0x1111;
196 	rdma_req.id = 0;
197 	rdma_req.req = &req;
198 
199 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
200 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
201 	req.payload.contig_or_cb_arg = &bio;
202 	req.qpair = &rqpair.qpair;
203 
204 	for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
205 		bio.iovs[i].iov_base = (void *)i + 1;
206 		bio.iovs[i].iov_len = 0;
207 	}
208 
209 	/* Test case 1: single SGL. Expected: PASS */
210 	bio.iovpos = 0;
211 	req.payload_offset = 0;
212 	req.payload_size = 0x1000;
213 	bio.iovs[0].iov_len = 0x1000;
214 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
215 	SPDK_CU_ASSERT_FATAL(rc == 0);
216 	CU_ASSERT(bio.iovpos == 1);
217 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
218 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
219 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
220 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
221 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
222 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
223 
224 	/* Test case 2: multiple SGL. Expected: PASS */
225 	bio.iovpos = 0;
226 	req.payload_offset = 0;
227 	req.payload_size = 0x4000;
228 	for (i = 0; i < 4; i++) {
229 		bio.iovs[i].iov_len = 0x1000;
230 	}
231 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
232 	SPDK_CU_ASSERT_FATAL(rc == 0);
233 	CU_ASSERT(bio.iovpos == 4);
234 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
235 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
236 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
237 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
238 	CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
239 			  struct spdk_nvme_cmd))
240 	for (i = 0; i < 4; i++) {
241 		CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
242 		CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
243 		CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
244 		CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY);
245 		CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
246 	}
247 
248 	/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
249 	bio.iovpos = 0;
250 	req.payload_offset = 0;
251 	g_mr_size = 0x800;
252 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
253 	SPDK_CU_ASSERT_FATAL(rc != 0);
254 	CU_ASSERT(bio.iovpos == 1);
255 
256 	/* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
257 	bio.iovpos = 0;
258 	bio.iovcnt = 4;
259 	req.payload_offset = 0;
260 	req.payload_size = 0x6000;
261 	g_mr_size = 0x0;
262 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
263 	SPDK_CU_ASSERT_FATAL(rc != 0);
264 	CU_ASSERT(bio.iovpos == bio.iovcnt);
265 	bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS;
266 
267 	/* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
268 	req.payload_size = 0x1000 + (1 << 24);
269 	bio.iovs[0].iov_len = 0x1000;
270 	bio.iovs[1].iov_len = 1 << 24;
271 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
272 	SPDK_CU_ASSERT_FATAL(rc != 0);
273 
274 	/* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */
275 	ctrlr.ioccsz_bytes = 60;
276 	bio.iovpos = 0;
277 	req.payload_offset = 0;
278 	req.payload_size = 0x4000;
279 	for (i = 0; i < 4; i++) {
280 		bio.iovs[i].iov_len = 0x1000;
281 	}
282 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
283 	SPDK_CU_ASSERT_FATAL(rc == -1);
284 }
285 
286 static void
287 test_nvme_rdma_build_sgl_inline_request(void)
288 {
289 	struct nvme_rdma_qpair rqpair;
290 	struct spdk_nvme_ctrlr ctrlr = {0};
291 	struct spdk_nvmf_cmd cmd = {{0}};
292 	struct spdk_nvme_rdma_req rdma_req = {0};
293 	struct nvme_request req = {{0}};
294 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
295 	int rc;
296 
297 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
298 	ctrlr.cdata.nvmf_specific.msdbd = 16;
299 
300 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
301 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
302 	rqpair.qpair.ctrlr = &ctrlr;
303 	rqpair.cmds = &cmd;
304 	cmd.sgl[0].address = 0x1111;
305 	rdma_req.id = 0;
306 	rdma_req.req = &req;
307 
308 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
309 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
310 	req.payload.contig_or_cb_arg = &bio;
311 	req.qpair = &rqpair.qpair;
312 
313 	/* Test case 1: single inline SGL. Expected: PASS */
314 	bio.iovpos = 0;
315 	req.payload_offset = 0;
316 	req.payload_size = 0x1000;
317 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
318 	bio.iovs[0].iov_len = 0x1000;
319 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
320 	SPDK_CU_ASSERT_FATAL(rc == 0);
321 	CU_ASSERT(bio.iovpos == 1);
322 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
323 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
324 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
325 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
326 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
327 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
328 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
329 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
330 
331 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
332 	bio.iovpos = 0;
333 	req.payload_offset = 0;
334 	req.payload_size = 1 << 24;
335 	bio.iovs[0].iov_len = 1 << 24;
336 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
337 	SPDK_CU_ASSERT_FATAL(rc == 0);
338 	CU_ASSERT(bio.iovpos == 1);
339 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
340 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
341 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
342 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
343 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
344 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
345 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
346 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
347 }
348 
349 static void
350 test_nvme_rdma_build_contig_request(void)
351 {
352 	struct nvme_rdma_qpair rqpair;
353 	struct spdk_nvme_ctrlr ctrlr = {0};
354 	struct spdk_nvmf_cmd cmd = {{0}};
355 	struct spdk_nvme_rdma_req rdma_req = {0};
356 	struct nvme_request req = {{0}};
357 	int rc;
358 
359 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
360 	ctrlr.cdata.nvmf_specific.msdbd = 16;
361 
362 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
363 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
364 	rqpair.qpair.ctrlr = &ctrlr;
365 	rqpair.cmds = &cmd;
366 	cmd.sgl[0].address = 0x1111;
367 	rdma_req.id = 0;
368 	rdma_req.req = &req;
369 
370 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
371 	req.qpair = &rqpair.qpair;
372 
373 	/* Test case 1: contig request. Expected: PASS */
374 	req.payload_offset = 0;
375 	req.payload_size = 0x1000;
376 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
377 	SPDK_CU_ASSERT_FATAL(rc == 0);
378 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
379 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
380 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
381 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
382 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
383 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
384 
385 	/* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
386 	req.payload_offset = 0;
387 	req.payload_size = 1 << 24;
388 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
389 	SPDK_CU_ASSERT_FATAL(rc != 0);
390 }
391 
392 static void
393 test_nvme_rdma_build_contig_inline_request(void)
394 {
395 	struct nvme_rdma_qpair rqpair;
396 	struct spdk_nvme_ctrlr ctrlr = {0};
397 	struct spdk_nvmf_cmd cmd = {{0}};
398 	struct spdk_nvme_rdma_req rdma_req = {0};
399 	struct nvme_request req = {{0}};
400 	int rc;
401 
402 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
403 	ctrlr.cdata.nvmf_specific.msdbd = 16;
404 
405 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
406 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
407 	rqpair.qpair.ctrlr = &ctrlr;
408 	rqpair.cmds = &cmd;
409 	cmd.sgl[0].address = 0x1111;
410 	rdma_req.id = 0;
411 	rdma_req.req = &req;
412 
413 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
414 	req.qpair = &rqpair.qpair;
415 
416 	/* Test case 1: single inline SGL. Expected: PASS */
417 	req.payload_offset = 0;
418 	req.payload_size = 0x1000;
419 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
420 	SPDK_CU_ASSERT_FATAL(rc == 0);
421 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
422 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
423 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
424 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
425 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
426 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
427 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
428 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
429 
430 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
431 	req.payload_offset = 0;
432 	req.payload_size = 1 << 24;
433 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
434 	SPDK_CU_ASSERT_FATAL(rc == 0);
435 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
436 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
437 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
438 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
439 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
440 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
441 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
442 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
443 }
444 
445 static void
446 test_nvme_rdma_alloc_reqs(void)
447 {
448 	struct nvme_rdma_qpair rqpair = {};
449 	int rc;
450 
451 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
452 
453 	/* Test case 1: zero entry. Expect: FAIL */
454 	rqpair.num_entries = 0;
455 
456 	rc = nvme_rdma_alloc_reqs(&rqpair);
457 	CU_ASSERT(rqpair.rdma_reqs == NULL);
458 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
459 
460 	/* Test case 2: single entry. Expect: PASS */
461 	memset(&rqpair, 0, sizeof(rqpair));
462 	rqpair.num_entries = 1;
463 
464 	rc = nvme_rdma_alloc_reqs(&rqpair);
465 	CU_ASSERT(rc == 0);
466 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
467 		  == (uint64_t)&rqpair.cmds[0]);
468 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
469 		  == (uint64_t)&rqpair.rdma_reqs[0].rdma_wr);
470 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL);
471 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND);
472 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED);
473 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list
474 		  == rqpair.rdma_reqs[0].send_sgl);
475 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0);
476 	spdk_free(rqpair.rdma_reqs);
477 	spdk_free(rqpair.cmds);
478 
479 	/* Test case 3: multiple entries. Expect: PASS */
480 	memset(&rqpair, 0, sizeof(rqpair));
481 	rqpair.num_entries = 5;
482 
483 	rc = nvme_rdma_alloc_reqs(&rqpair);
484 	CU_ASSERT(rc == 0);
485 	for (int i = 0; i < 5; i++) {
486 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
487 			  == (uint64_t)&rqpair.cmds[i]);
488 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
489 			  == (uint64_t)&rqpair.rdma_reqs[i].rdma_wr);
490 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL);
491 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND);
492 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags
493 			  == IBV_SEND_SIGNALED);
494 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list
495 			  == rqpair.rdma_reqs[i].send_sgl);
496 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0);
497 	}
498 	spdk_free(rqpair.rdma_reqs);
499 	spdk_free(rqpair.cmds);
500 }
501 
502 static void
503 test_nvme_rdma_alloc_rsps(void)
504 {
505 	struct nvme_rdma_qpair rqpair = {};
506 	int rc;
507 
508 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
509 
510 	/* Test case 1 calloc false */
511 	rqpair.num_entries = 0;
512 	rc = nvme_rdma_alloc_rsps(&rqpair);
513 	CU_ASSERT(rqpair.rsp_sgls == NULL);
514 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
515 
516 	/* Test case 2 calloc success */
517 	memset(&rqpair, 0, sizeof(rqpair));
518 	rqpair.num_entries = 1;
519 
520 	rc = nvme_rdma_alloc_rsps(&rqpair);
521 	CU_ASSERT(rc == 0);
522 	CU_ASSERT(rqpair.rsp_sgls != NULL);
523 	CU_ASSERT(rqpair.rsp_recv_wrs != NULL);
524 	CU_ASSERT(rqpair.rsps != NULL);
525 	nvme_rdma_free_rsps(&rqpair);
526 }
527 
528 static void
529 test_nvme_rdma_ctrlr_create_qpair(void)
530 {
531 	struct spdk_nvme_ctrlr ctrlr = {};
532 	uint16_t qid, qsize;
533 	struct spdk_nvme_qpair *qpair;
534 	struct nvme_rdma_qpair *rqpair;
535 
536 	/* Test case 1: max qsize. Expect: PASS */
537 	qsize = 0xffff;
538 	qid = 1;
539 
540 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
541 					     SPDK_NVME_QPRIO_URGENT, 1,
542 					     false, false);
543 	CU_ASSERT(qpair != NULL);
544 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
545 	CU_ASSERT(qpair == &rqpair->qpair);
546 	CU_ASSERT(rqpair->num_entries == qsize - 1);
547 	CU_ASSERT(rqpair->delay_cmd_submit == false);
548 	CU_ASSERT(rqpair->rsp_sgls != NULL);
549 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
550 	CU_ASSERT(rqpair->rsps != NULL);
551 
552 	nvme_rdma_free_reqs(rqpair);
553 	nvme_rdma_free_rsps(rqpair);
554 	nvme_rdma_free(rqpair);
555 	rqpair = NULL;
556 
557 	/* Test case 2: queue size 2. Expect: PASS */
558 	qsize = 2;
559 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
560 					     SPDK_NVME_QPRIO_URGENT, 1,
561 					     false, false);
562 	CU_ASSERT(qpair != NULL);
563 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
564 	CU_ASSERT(rqpair->num_entries == qsize - 1);
565 	CU_ASSERT(rqpair->rsp_sgls != NULL);
566 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
567 	CU_ASSERT(rqpair->rsps != NULL);
568 
569 	nvme_rdma_free_reqs(rqpair);
570 	nvme_rdma_free_rsps(rqpair);
571 	nvme_rdma_free(rqpair);
572 	rqpair = NULL;
573 
574 	/* Test case 3: queue size zero. Expect: FAIL */
575 	qsize = 0;
576 
577 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
578 					     SPDK_NVME_QPRIO_URGENT, 1,
579 					     false, false);
580 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
581 
582 	/* Test case 4: queue size 1. Expect: FAIL */
583 	qsize = 1;
584 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
585 					     SPDK_NVME_QPRIO_URGENT, 1,
586 					     false, false);
587 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
588 }
589 
590 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context,
591 		struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF);
592 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
593 
594 static void
595 test_nvme_rdma_poller_create(void)
596 {
597 	struct nvme_rdma_poll_group	group = {};
598 	struct ibv_context *contexts = (struct ibv_context *)0xDEADBEEF;
599 
600 	/* Case: calloc and ibv not need to fail test */
601 	STAILQ_INIT(&group.pollers);
602 	group.num_pollers = 1;
603 	int rc = nvme_rdma_poller_create(&group, contexts);
604 
605 	CU_ASSERT(rc == 0);
606 	CU_ASSERT(group.num_pollers = 2);
607 	CU_ASSERT(&group.pollers != NULL);
608 	CU_ASSERT(group.pollers.stqh_first->device == contexts);
609 	CU_ASSERT(group.pollers.stqh_first->cq == (struct ibv_cq *)0xFEEDBEEF);
610 	CU_ASSERT(group.pollers.stqh_first->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
611 	CU_ASSERT(group.pollers.stqh_first->required_num_wc == 0);
612 
613 	nvme_rdma_poll_group_free_pollers(&group);
614 }
615 
616 static void
617 test_nvme_rdma_qpair_process_cm_event(void)
618 {
619 	struct nvme_rdma_qpair rqpair = {};
620 	struct rdma_cm_event	 event = {};
621 	struct spdk_nvmf_rdma_accept_private_data	accept_data = {};
622 	int rc = 0;
623 
624 	/* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */
625 	rqpair.evt = &event;
626 	event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
627 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
628 	CU_ASSERT(rc == 0);
629 
630 	/* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */
631 	rqpair.evt = &event;
632 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
633 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
634 	CU_ASSERT(rc == 0);
635 
636 	/* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */
637 	rqpair.evt = &event;
638 	event.event = RDMA_CM_EVENT_CONNECT_ERROR;
639 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
640 	CU_ASSERT(rc == 0);
641 
642 	/* case4: event == RDMA_CM_EVENT_UNREACHABLE */
643 	rqpair.evt = &event;
644 	event.event = RDMA_CM_EVENT_UNREACHABLE;
645 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
646 	CU_ASSERT(rc == 0);
647 
648 	/* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */
649 	rqpair.evt = &event;
650 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
651 	event.param.conn.private_data = NULL;
652 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
653 	CU_ASSERT(rc == -1);
654 
655 	rqpair.evt = &event;
656 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
657 	event.param.conn.private_data = &accept_data;
658 	accept_data.crqsize = 512;
659 	rqpair.num_entries = 1024;
660 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
661 	CU_ASSERT(rc == 0);
662 	CU_ASSERT(rqpair.num_entries == 1024);
663 
664 	/* case6: event == RDMA_CM_EVENT_DISCONNECTED */
665 	rqpair.evt = &event;
666 	event.event = RDMA_CM_EVENT_DISCONNECTED;
667 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
668 	CU_ASSERT(rc == 0);
669 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE);
670 
671 	/* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */
672 	rqpair.evt = &event;
673 	event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
674 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
675 	CU_ASSERT(rc == 0);
676 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
677 
678 	/* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */
679 	rqpair.evt = &event;
680 	event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
681 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
682 	CU_ASSERT(rc == 0);
683 
684 	/* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */
685 	rqpair.evt = &event;
686 	event.event = RDMA_CM_EVENT_ADDR_CHANGE;
687 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
688 	CU_ASSERT(rc == 0);
689 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
690 
691 	/* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */
692 	rqpair.evt = &event;
693 	event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
694 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
695 	CU_ASSERT(rc == 0);
696 
697 	/* case11: default event == 0xFF */
698 	rqpair.evt = &event;
699 	event.event = 0xFF;
700 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
701 	CU_ASSERT(rc == 0);
702 }
703 
704 static void
705 test_nvme_rdma_mr_get_lkey(void)
706 {
707 	union nvme_rdma_mr mr = {};
708 	struct ibv_mr	ibv_mr = {};
709 	uint64_t mr_key;
710 	uint32_t lkey;
711 
712 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
713 	ibv_mr.lkey = 1;
714 	mr_key = 2;
715 
716 	/* Case 1:  get key form key address */
717 	mr.key = (uint64_t)&mr_key;
718 	g_nvme_hooks.get_rkey = (void *)0xAEADBEEF;
719 
720 	lkey = nvme_rdma_mr_get_lkey(&mr);
721 	CU_ASSERT(lkey == mr_key);
722 
723 	/* Case 2: Get key from ibv_mr  */
724 	g_nvme_hooks.get_rkey = NULL;
725 	mr.mr = &ibv_mr;
726 
727 	lkey = nvme_rdma_mr_get_lkey(&mr);
728 	CU_ASSERT(lkey == ibv_mr.lkey);
729 }
730 
731 static void
732 test_nvme_rdma_ctrlr_construct(void)
733 {
734 	struct spdk_nvme_ctrlr *ctrlr;
735 	struct spdk_nvme_transport_id trid = {};
736 	struct spdk_nvme_ctrlr_opts opts = {};
737 	struct nvme_rdma_qpair *rqpair = NULL;
738 	struct nvme_rdma_ctrlr *rctrlr = NULL;
739 	struct rdma_event_channel cm_channel = {};
740 	void *devhandle = NULL;
741 	int rc;
742 
743 	opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1;
744 	opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
745 	opts.admin_queue_size = 0xFFFF;
746 	trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
747 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
748 	MOCK_SET(rdma_create_event_channel, &cm_channel);
749 
750 	ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle);
751 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
752 	CU_ASSERT(ctrlr->opts.transport_retry_count ==
753 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
754 	CU_ASSERT(ctrlr->opts.transport_ack_timeout ==
755 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
756 	CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size);
757 	rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
758 	CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS);
759 	CU_ASSERT(rctrlr->cm_channel == &cm_channel);
760 	CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid,
761 			   (char *)&trid, sizeof(trid)));
762 
763 	SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL);
764 	rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
765 	CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1);
766 	CU_ASSERT(rqpair->delay_cmd_submit == false);
767 	CU_ASSERT(rqpair->rsp_sgls != NULL);
768 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
769 	CU_ASSERT(rqpair->rsps != NULL);
770 	MOCK_CLEAR(rdma_create_event_channel);
771 
772 	/* Hardcode the trtype, because nvme_qpair_init() is stub function. */
773 	rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
774 	rc = nvme_rdma_ctrlr_destruct(ctrlr);
775 	CU_ASSERT(rc == 0);
776 }
777 
778 static void
779 test_nvme_rdma_req_put_and_get(void)
780 {
781 	struct nvme_rdma_qpair rqpair = {};
782 	struct spdk_nvme_rdma_req rdma_req = {};
783 	struct spdk_nvme_rdma_req *rdma_req_get;
784 
785 	/* case 1: nvme_rdma_req_put */
786 	TAILQ_INIT(&rqpair.free_reqs);
787 	rdma_req.completion_flags = 1;
788 	rdma_req.req = (struct nvme_request *)0xDEADBEFF;
789 	rdma_req.id = 10086;
790 	nvme_rdma_req_put(&rqpair, &rdma_req);
791 
792 	CU_ASSERT(rqpair.free_reqs.tqh_first == &rdma_req);
793 	CU_ASSERT(rqpair.free_reqs.tqh_first->completion_flags == 0);
794 	CU_ASSERT(rqpair.free_reqs.tqh_first->req == NULL);
795 	CU_ASSERT(rqpair.free_reqs.tqh_first->id == 10086);
796 	CU_ASSERT(rdma_req.completion_flags == 0);
797 	CU_ASSERT(rdma_req.req == NULL);
798 
799 	/* case 2: nvme_rdma_req_get */
800 	TAILQ_INIT(&rqpair.outstanding_reqs);
801 	rdma_req_get = nvme_rdma_req_get(&rqpair);
802 	CU_ASSERT(rdma_req_get == &rdma_req);
803 	CU_ASSERT(rdma_req_get->id == 10086);
804 	CU_ASSERT(rqpair.free_reqs.tqh_first == NULL);
805 	CU_ASSERT(rqpair.outstanding_reqs.tqh_first == rdma_req_get);
806 }
807 
808 static void
809 test_nvme_rdma_req_init(void)
810 {
811 	struct nvme_rdma_qpair rqpair = {};
812 	struct spdk_nvme_ctrlr ctrlr = {};
813 	struct spdk_nvmf_cmd cmd = {};
814 	struct spdk_nvme_rdma_req rdma_req = {};
815 	struct nvme_request req = {};
816 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
817 	int rc = 1;
818 
819 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
820 	ctrlr.cdata.nvmf_specific.msdbd = 16;
821 
822 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
823 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
824 	rqpair.qpair.ctrlr = &ctrlr;
825 	rqpair.cmds = &cmd;
826 	cmd.sgl[0].address = 0x1111;
827 	rdma_req.id = 0;
828 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
829 
830 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
831 	/* case 1: req->payload_size == 0, expect: pass. */
832 	req.payload_size = 0;
833 	rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
834 	rqpair.qpair.ctrlr->icdoff = 0;
835 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
836 	CU_ASSERT(rc == 0);
837 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
838 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
839 	CU_ASSERT(rdma_req.send_wr.num_sge == 1);
840 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
841 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
842 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == 0);
843 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == 0);
844 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
845 
846 	/* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */
847 	/* icd_supported is true */
848 	rdma_req.req = NULL;
849 	rqpair.qpair.ctrlr->icdoff = 0;
850 	req.payload_offset = 0;
851 	req.payload_size = 1024;
852 	req.payload.reset_sgl_fn = NULL;
853 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
854 	CU_ASSERT(rc == 0);
855 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
856 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
857 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
858 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
859 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
860 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
861 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
862 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
863 
864 	/* icd_supported is false */
865 	rdma_req.req = NULL;
866 	rqpair.qpair.ctrlr->icdoff = 1;
867 	req.payload_offset = 0;
868 	req.payload_size = 1024;
869 	req.payload.reset_sgl_fn = NULL;
870 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
871 	CU_ASSERT(rc == 0);
872 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
873 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
874 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
875 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
876 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
877 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
878 
879 	/* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */
880 	/* icd_supported is true */
881 	rdma_req.req = NULL;
882 	rqpair.qpair.ctrlr->icdoff = 0;
883 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
884 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
885 	req.payload.contig_or_cb_arg = &bio;
886 	req.qpair = &rqpair.qpair;
887 	bio.iovpos = 0;
888 	req.payload_offset = 0;
889 	req.payload_size = 1024;
890 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
891 	bio.iovs[0].iov_len = 1024;
892 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
893 	CU_ASSERT(rc == 0);
894 	CU_ASSERT(bio.iovpos == 1);
895 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
896 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
897 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
898 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
899 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
900 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
901 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
902 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
903 
904 	/* icd_supported is false */
905 	rdma_req.req = NULL;
906 	rqpair.qpair.ctrlr->icdoff = 1;
907 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
908 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
909 	req.payload.contig_or_cb_arg = &bio;
910 	req.qpair = &rqpair.qpair;
911 	bio.iovpos = 0;
912 	req.payload_offset = 0;
913 	req.payload_size = 1024;
914 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
915 	bio.iovs[0].iov_len = 1024;
916 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
917 	CU_ASSERT(rc == 0);
918 	CU_ASSERT(bio.iovpos == 1);
919 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
920 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
921 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
922 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
923 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
924 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
925 }
926 
927 static void
928 test_nvme_rdma_validate_cm_event(void)
929 {
930 	enum rdma_cm_event_type expected_evt_type;
931 	struct rdma_cm_event reaped_evt = {};
932 	int rc;
933 
934 	/* case 1: expected_evt_type == reaped_evt->event, expect: pass */
935 	expected_evt_type = RDMA_CM_EVENT_ADDR_RESOLVED;
936 	reaped_evt.event = RDMA_CM_EVENT_ADDR_RESOLVED;
937 
938 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
939 	CU_ASSERT(rc == 0);
940 
941 	/* case 2: expected_evt_type != RDMA_CM_EVENT_ESTABLISHED and is not equal to reaped_evt->event, expect: fail */
942 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
943 
944 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
945 	CU_ASSERT(rc == -EBADMSG);
946 
947 	/* case 3: expected_evt_type == RDMA_CM_EVENT_ESTABLISHED */
948 	expected_evt_type = RDMA_CM_EVENT_ESTABLISHED;
949 	/* reaped_evt->event == RDMA_CM_EVENT_REJECTED and reaped_evt->status == 10, expect: fail */
950 	reaped_evt.event = RDMA_CM_EVENT_REJECTED;
951 	reaped_evt.status = 10;
952 
953 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
954 	CU_ASSERT(rc == -ESTALE);
955 
956 	/* reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE, expect: pass */
957 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
958 
959 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
960 	CU_ASSERT(rc == 0);
961 }
962 
963 static void
964 test_nvme_rdma_register_and_unregister_reqs(void)
965 {
966 	struct nvme_rdma_qpair rqpair = {};
967 	struct spdk_nvmf_cmd cmds = {};
968 	struct rdma_cm_id cm_id = {};
969 	struct spdk_nvme_rdma_req rdma_reqs[50] = {};
970 	int rc;
971 
972 	rqpair.cm_id = &cm_id;
973 	rqpair.cmds = &cmds;
974 	g_nvme_hooks.get_rkey = NULL;
975 	rqpair.rdma_reqs = rdma_reqs;
976 	/* case 1: nvme_rdma_register_req: nvme_rdma_reg_mr fail, expect: fail */
977 	rqpair.num_entries = 0;
978 
979 	rc = nvme_rdma_register_reqs(&rqpair);
980 	CU_ASSERT(rc == -ENOMEM);
981 	CU_ASSERT(rqpair.cmd_mr.mr == NULL);
982 
983 	/* case 2: nvme_rdma_register_req: single entry, expect: PASS */
984 	rqpair.num_entries = 1;
985 
986 	rc = nvme_rdma_register_reqs(&rqpair);
987 	CU_ASSERT(rc == 0);
988 	CU_ASSERT(rqpair.cmd_mr.mr == &g_rdma_mr);
989 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == rqpair.cmd_mr.mr->lkey);
990 
991 	/* case 3: nvme_rdma_register_req: multiple entry, expect: PASS */
992 	rqpair.num_entries = 50;
993 
994 	rc = nvme_rdma_register_reqs(&rqpair);
995 	CU_ASSERT(rc == 0);
996 	CU_ASSERT(rqpair.cmd_mr.mr == &g_rdma_mr);
997 	for (int i = 0; i < rqpair.num_entries; i++) {
998 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == rqpair.cmd_mr.mr->lkey);
999 	}
1000 
1001 	/* case4: nvme_rdma_unregister_reqs, expect: PASS */
1002 	nvme_rdma_unregister_reqs(&rqpair);
1003 	CU_ASSERT(rqpair.cmd_mr.mr == NULL);
1004 }
1005 
1006 static void
1007 test_nvme_rdma_parse_addr(void)
1008 {
1009 	struct sockaddr_storage dst_addr;
1010 	int rc = 0;
1011 
1012 	memset(&dst_addr, 0, sizeof(dst_addr));
1013 	/* case1: getaddrinfo failed */
1014 	rc = nvme_rdma_parse_addr(&dst_addr, AF_INET, NULL, NULL);
1015 	CU_ASSERT(rc != 0);
1016 
1017 	/* case2: res->ai_addrlen < sizeof(*sa). Expect: Pass. */
1018 	rc = nvme_rdma_parse_addr(&dst_addr, AF_INET, "12.34.56.78", "23");
1019 	CU_ASSERT(rc == 0);
1020 	CU_ASSERT(dst_addr.ss_family == AF_INET);
1021 }
1022 
1023 static void
1024 test_nvme_rdma_qpair_init(void)
1025 {
1026 	struct nvme_rdma_qpair		rqpair = {};
1027 	struct rdma_cm_id		 cm_id = {};
1028 	struct ibv_pd				*pd = (struct ibv_pd *)0xfeedbeef;
1029 	struct ibv_qp				qp = { .pd = pd };
1030 	struct nvme_rdma_ctrlr	rctrlr = {};
1031 	int rc = 0;
1032 
1033 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1034 	rqpair.cm_id = &cm_id;
1035 	g_nvme_hooks.get_ibv_pd = NULL;
1036 	rqpair.qpair.poll_group = NULL;
1037 	rqpair.qpair.ctrlr = &rctrlr.ctrlr;
1038 	g_spdk_rdma_qp.qp = &qp;
1039 
1040 	rc = nvme_rdma_qpair_init(&rqpair);
1041 	CU_ASSERT(rc == 0);
1042 
1043 	CU_ASSERT(rqpair.cm_id->context == &rqpair.qpair);
1044 	CU_ASSERT(rqpair.max_send_sge == NVME_RDMA_DEFAULT_TX_SGE);
1045 	CU_ASSERT(rqpair.max_recv_sge == NVME_RDMA_DEFAULT_RX_SGE);
1046 	CU_ASSERT(rqpair.current_num_sends == 0);
1047 	CU_ASSERT(rqpair.current_num_recvs == 0);
1048 	CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF);
1049 	CU_ASSERT(rqpair.memory_domain != NULL);
1050 }
1051 
1052 static void
1053 test_nvme_rdma_qpair_submit_request(void)
1054 {
1055 	int				rc;
1056 	struct nvme_rdma_qpair		rqpair = {};
1057 	struct spdk_nvme_ctrlr		ctrlr = {};
1058 	struct nvme_request		req = {};
1059 	struct nvme_rdma_poller		poller = {};
1060 	struct spdk_nvme_rdma_req	*rdma_req = NULL;
1061 
1062 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1063 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
1064 	req.payload_size = 0;
1065 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
1066 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
1067 	rqpair.qpair.ctrlr = &ctrlr;
1068 	rqpair.num_entries = 1;
1069 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1070 	rqpair.poller = &poller;
1071 
1072 	rc = nvme_rdma_alloc_reqs(&rqpair);
1073 	CU_ASSERT(rc == 0);
1074 	/* Give send_wr.next a non null value */
1075 	rdma_req = TAILQ_FIRST(&rqpair.free_reqs);
1076 	SPDK_CU_ASSERT_FATAL(rdma_req != NULL);
1077 	rdma_req->send_wr.next = (void *)0xdeadbeef;
1078 
1079 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1080 	CU_ASSERT(rc == 0);
1081 	CU_ASSERT(rqpair.current_num_sends == 1);
1082 	CU_ASSERT(rdma_req->send_wr.next == NULL);
1083 	TAILQ_REMOVE(&rqpair.outstanding_reqs, rdma_req, link);
1084 	CU_ASSERT(TAILQ_EMPTY(&rqpair.outstanding_reqs));
1085 
1086 	/* No request available */
1087 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1088 	CU_ASSERT(rc == -EAGAIN);
1089 	CU_ASSERT(rqpair.poller->stats.queued_requests == 1);
1090 
1091 	nvme_rdma_free_reqs(&rqpair);
1092 }
1093 
1094 static void
1095 test_nvme_rdma_memory_domain(void)
1096 {
1097 	struct nvme_rdma_memory_domain *domain_1 = NULL, *domain_2 = NULL, *domain_tmp;
1098 	struct ibv_pd *pd_1 = (struct ibv_pd *)0x1, *pd_2 = (struct ibv_pd *)0x2;
1099 	/* Counters below are used to check the number of created/destroyed rdma_dma_device objects.
1100 	 * Since other unit tests may create dma_devices, we can't just check that the queue is empty or not */
1101 	uint32_t dma_dev_count_start = 0, dma_dev_count = 0, dma_dev_count_end = 0;
1102 
1103 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1104 		dma_dev_count_start++;
1105 	}
1106 
1107 	/* spdk_memory_domain_create failed, expect fail */
1108 	MOCK_SET(spdk_memory_domain_create, -1);
1109 	domain_1 = nvme_rdma_get_memory_domain(pd_1);
1110 	CU_ASSERT(domain_1 == NULL);
1111 	MOCK_CLEAR(spdk_memory_domain_create);
1112 
1113 	/* Normal scenario */
1114 	domain_1 = nvme_rdma_get_memory_domain(pd_1);
1115 	SPDK_CU_ASSERT_FATAL(domain_1 != NULL);
1116 	CU_ASSERT(domain_1->domain != NULL);
1117 	CU_ASSERT(domain_1->pd == pd_1);
1118 	CU_ASSERT(domain_1->ref == 1);
1119 
1120 	/* Request the same pd, ref counter increased */
1121 	CU_ASSERT(nvme_rdma_get_memory_domain(pd_1) == domain_1);
1122 	CU_ASSERT(domain_1->ref == 2);
1123 
1124 	/* Request another pd */
1125 	domain_2 = nvme_rdma_get_memory_domain(pd_2);
1126 	SPDK_CU_ASSERT_FATAL(domain_2 != NULL);
1127 	CU_ASSERT(domain_2->domain != NULL);
1128 	CU_ASSERT(domain_2->pd == pd_2);
1129 	CU_ASSERT(domain_2->ref == 1);
1130 
1131 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1132 		dma_dev_count++;
1133 	}
1134 	CU_ASSERT(dma_dev_count == dma_dev_count_start + 2);
1135 
1136 	/* put domain_1, decrement refcount */
1137 	nvme_rdma_put_memory_domain(domain_1);
1138 
1139 	/* Release both devices */
1140 	CU_ASSERT(domain_2->ref == 1);
1141 	nvme_rdma_put_memory_domain(domain_1);
1142 	nvme_rdma_put_memory_domain(domain_2);
1143 
1144 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1145 		dma_dev_count_end++;
1146 	}
1147 	CU_ASSERT(dma_dev_count_start == dma_dev_count_end);
1148 }
1149 
1150 static void
1151 test_rdma_ctrlr_get_memory_domains(void)
1152 {
1153 	struct nvme_rdma_ctrlr rctrlr = {};
1154 	struct nvme_rdma_qpair rqpair = {};
1155 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
1156 	struct nvme_rdma_memory_domain rdma_domain = { .domain = domain };
1157 	struct spdk_memory_domain *domains[1] = {NULL};
1158 
1159 	rqpair.memory_domain = &rdma_domain;
1160 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1161 	rctrlr.ctrlr.adminq = &rqpair.qpair;
1162 
1163 	/* Test 1, input domains pointer is NULL */
1164 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 1) == 1);
1165 
1166 	/* Test 2, input array_size is 0 */
1167 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 0) == 1);
1168 	CU_ASSERT(domains[0] == NULL);
1169 
1170 	/* Test 3, both input domains pointer and array_size are NULL/0 */
1171 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 0) == 1);
1172 
1173 	/* Test 2, input parameters are valid */
1174 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 1) == 1);
1175 	CU_ASSERT(domains[0] == domain);
1176 }
1177 
1178 static void
1179 test_rdma_get_memory_translation(void)
1180 {
1181 	struct ibv_qp qp = {.pd = (struct ibv_pd *) 0xfeedbeef};
1182 	struct spdk_rdma_qp rdma_qp = {.qp = &qp};
1183 	struct nvme_rdma_qpair rqpair = {.rdma_qp = &rdma_qp};
1184 	struct spdk_nvme_ns_cmd_ext_io_opts io_opts = {
1185 		.memory_domain = (struct spdk_memory_domain *) 0xdeaddead
1186 	};
1187 	struct nvme_request req = {.payload = {.opts = &io_opts}};
1188 	struct nvme_rdma_memory_translation_ctx ctx = {
1189 		.addr = (void *) 0xBAADF00D,
1190 		.length = 0x100
1191 	};
1192 	int rc;
1193 
1194 	rqpair.memory_domain = nvme_rdma_get_memory_domain(rqpair.rdma_qp->qp->pd);
1195 	SPDK_CU_ASSERT_FATAL(rqpair.memory_domain != NULL);
1196 
1197 	/* case 1, using extended IO opts with DMA device.
1198 	 * Test 1 - spdk_dma_translate_data error, expect fail */
1199 	MOCK_SET(spdk_memory_domain_translate_data, -1);
1200 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1201 	CU_ASSERT(rc != 0);
1202 	MOCK_CLEAR(spdk_memory_domain_translate_data);
1203 
1204 	/* Test 2 - expect pass */
1205 	g_memory_translation_translation.iov_count = 1;
1206 	g_memory_translation_translation.iov.iov_base = ctx.addr + 1;
1207 	g_memory_translation_translation.iov.iov_len = ctx.length;
1208 	g_memory_translation_translation.rdma.lkey = 123;
1209 	g_memory_translation_translation.rdma.rkey = 321;
1210 
1211 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1212 	CU_ASSERT(rc == 0);
1213 	CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey);
1214 	CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey);
1215 	CU_ASSERT(ctx.addr == g_memory_translation_translation.iov.iov_base);
1216 	CU_ASSERT(ctx.length == g_memory_translation_translation.iov.iov_len);
1217 
1218 	/* case 2, using rdma translation
1219 	 * Test 1 - spdk_rdma_get_translation error, expect fail */
1220 	req.payload.opts = NULL;
1221 	MOCK_SET(spdk_rdma_get_translation, -1);
1222 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1223 	CU_ASSERT(rc != 0);
1224 	MOCK_CLEAR(spdk_rdma_get_translation);
1225 
1226 	/* Test 2 - expect pass */
1227 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1228 	CU_ASSERT(rc == 0);
1229 	CU_ASSERT(ctx.lkey == RDMA_UT_LKEY);
1230 	CU_ASSERT(ctx.rkey == RDMA_UT_RKEY);
1231 
1232 	/* Cleanup */
1233 	nvme_rdma_put_memory_domain(rqpair.memory_domain);
1234 }
1235 
1236 static void
1237 test_nvme_rdma_poll_group_get_qpair_by_id(void)
1238 {
1239 	const uint32_t test_qp_num = 123;
1240 	struct nvme_rdma_poll_group	group = {};
1241 	struct nvme_rdma_qpair rqpair = {};
1242 	struct spdk_rdma_qp rdma_qp = {};
1243 	struct ibv_qp qp = { .qp_num = test_qp_num };
1244 
1245 	STAILQ_INIT(&group.group.disconnected_qpairs);
1246 	STAILQ_INIT(&group.group.connected_qpairs);
1247 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1248 
1249 	/* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists.
1250 	 * nvme_rdma_poll_group_get_qpair_by_id must return NULL */
1251 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1252 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
1253 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1254 
1255 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1256 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
1257 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1258 
1259 	/* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */
1260 	rdma_qp.qp = &qp;
1261 	rqpair.rdma_qp = &rdma_qp;
1262 
1263 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1264 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
1265 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1266 
1267 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1268 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
1269 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1270 }
1271 
1272 static void
1273 test_nvme_rdma_ctrlr_get_max_sges(void)
1274 {
1275 	struct nvme_rdma_ctrlr	rctrlr = {};
1276 
1277 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1278 	rctrlr.max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
1279 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1280 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1281 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1282 
1283 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 32;
1284 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1285 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1286 
1287 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 8;
1288 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1289 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 8);
1290 
1291 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1292 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4;
1293 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 1);
1294 
1295 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1296 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 6;
1297 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 2);
1298 }
1299 
1300 static void
1301 test_nvme_rdma_poll_group_get_stats(void)
1302 {
1303 	int rc = -1;
1304 	struct spdk_nvme_transport_poll_group_stat *tpointer = NULL;
1305 	struct nvme_rdma_poll_group tgroup = {};
1306 	struct ibv_device dev1, dev2 = {};
1307 	struct ibv_context contexts1, contexts2 = {};
1308 	struct nvme_rdma_poller *tpoller1 = NULL;
1309 	struct nvme_rdma_poller *tpoller2 = NULL;
1310 
1311 	memcpy(dev1.name, "/dev/test1", sizeof("/dev/test1"));
1312 	memcpy(dev2.name, "/dev/test2", sizeof("/dev/test2"));
1313 	contexts1.device = &dev1;
1314 	contexts2.device = &dev2;
1315 
1316 	/* Initialization */
1317 	STAILQ_INIT(&tgroup.pollers);
1318 	rc = nvme_rdma_poller_create(&tgroup, &contexts1);
1319 	CU_ASSERT(rc == 0);
1320 	CU_ASSERT(tgroup.num_pollers == 1);
1321 
1322 	rc = nvme_rdma_poller_create(&tgroup, &contexts2);
1323 	CU_ASSERT(rc == 0);
1324 	CU_ASSERT(tgroup.num_pollers == 2);
1325 	CU_ASSERT(&tgroup.pollers != NULL);
1326 
1327 	tpoller1 = STAILQ_FIRST(&tgroup.pollers);
1328 	SPDK_CU_ASSERT_FATAL(tpoller1 != NULL);
1329 	tpoller2 = STAILQ_NEXT(tpoller1, link);
1330 	SPDK_CU_ASSERT_FATAL(tpoller2 != NULL);
1331 
1332 	CU_ASSERT(tpoller1->device == &contexts2);
1333 	CU_ASSERT(tpoller2->device == &contexts1);
1334 	CU_ASSERT(strcmp(tpoller1->device->device->name, "/dev/test2") == 0);
1335 	CU_ASSERT(strcmp(tpoller2->device->device->name, "/dev/test1") == 0);
1336 	CU_ASSERT(tpoller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1337 	CU_ASSERT(tpoller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1338 	CU_ASSERT(tpoller1->required_num_wc == 0);
1339 	CU_ASSERT(tpoller2->required_num_wc == 0);
1340 
1341 	/* Test1: Invalid stats */
1342 	rc = nvme_rdma_poll_group_get_stats(NULL, &tpointer);
1343 	CU_ASSERT(rc == -EINVAL);
1344 
1345 	/* Test2: Invalid group pointer */
1346 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, NULL);
1347 	CU_ASSERT(rc == -EINVAL);
1348 
1349 	/* Test3: Success member variables should be correct */
1350 	tpoller1->stats.polls = 111;
1351 	tpoller1->stats.idle_polls = 112;
1352 	tpoller1->stats.completions = 113;
1353 	tpoller1->stats.queued_requests = 114;
1354 	tpoller1->stats.rdma_stats.send.num_submitted_wrs = 121;
1355 	tpoller1->stats.rdma_stats.send.doorbell_updates = 122;
1356 	tpoller1->stats.rdma_stats.recv.num_submitted_wrs = 131;
1357 	tpoller1->stats.rdma_stats.recv.doorbell_updates = 132;
1358 	tpoller2->stats.polls = 211;
1359 	tpoller2->stats.idle_polls = 212;
1360 	tpoller2->stats.completions = 213;
1361 	tpoller2->stats.queued_requests = 214;
1362 	tpoller2->stats.rdma_stats.send.num_submitted_wrs = 221;
1363 	tpoller2->stats.rdma_stats.send.doorbell_updates = 222;
1364 	tpoller2->stats.rdma_stats.recv.num_submitted_wrs = 231;
1365 	tpoller2->stats.rdma_stats.recv.doorbell_updates = 232;
1366 
1367 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, &tpointer);
1368 	CU_ASSERT(rc == 0);
1369 	CU_ASSERT(tpointer != NULL);
1370 	CU_ASSERT(tpointer->trtype == SPDK_NVME_TRANSPORT_RDMA);
1371 	CU_ASSERT(tpointer->rdma.num_devices == tgroup.num_pollers);
1372 	CU_ASSERT(tpointer->rdma.device_stats != NULL);
1373 
1374 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[0].name, "/dev/test2") == 0);
1375 	CU_ASSERT(tpointer->rdma.device_stats[0].polls == 111);
1376 	CU_ASSERT(tpointer->rdma.device_stats[0].idle_polls == 112);
1377 	CU_ASSERT(tpointer->rdma.device_stats[0].completions == 113);
1378 	CU_ASSERT(tpointer->rdma.device_stats[0].queued_requests == 114);
1379 	CU_ASSERT(tpointer->rdma.device_stats[0].total_send_wrs == 121);
1380 	CU_ASSERT(tpointer->rdma.device_stats[0].send_doorbell_updates == 122);
1381 	CU_ASSERT(tpointer->rdma.device_stats[0].total_recv_wrs == 131);
1382 	CU_ASSERT(tpointer->rdma.device_stats[0].recv_doorbell_updates == 132);
1383 
1384 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[1].name, "/dev/test1") == 0);
1385 	CU_ASSERT(tpointer->rdma.device_stats[1].polls == 211);
1386 	CU_ASSERT(tpointer->rdma.device_stats[1].idle_polls == 212);
1387 	CU_ASSERT(tpointer->rdma.device_stats[1].completions == 213);
1388 	CU_ASSERT(tpointer->rdma.device_stats[1].queued_requests == 214);
1389 	CU_ASSERT(tpointer->rdma.device_stats[1].total_send_wrs == 221);
1390 	CU_ASSERT(tpointer->rdma.device_stats[1].send_doorbell_updates == 222);
1391 	CU_ASSERT(tpointer->rdma.device_stats[1].total_recv_wrs == 231);
1392 	CU_ASSERT(tpointer->rdma.device_stats[1].recv_doorbell_updates == 232);
1393 
1394 	nvme_rdma_poll_group_free_stats(&tgroup.group, tpointer);
1395 	nvme_rdma_poll_group_free_pollers(&tgroup);
1396 }
1397 
1398 static void
1399 test_nvme_rdma_poll_group_set_cq(void)
1400 {
1401 	int rc = -1;
1402 	struct nvme_rdma_poll_group *group;
1403 	struct spdk_nvme_transport_poll_group *tgroup;
1404 	struct nvme_rdma_poller *poller1;
1405 	struct nvme_rdma_poller *poller2;
1406 	struct nvme_rdma_qpair rqpair = {};
1407 	struct rdma_cm_id cm_id = {};
1408 
1409 	/* Case1: Test function nvme_rdma_poll_group_create
1410 	   Test1: Function rdma_get_devices failed */
1411 	MOCK_SET(rdma_get_devices, NULL);
1412 
1413 	tgroup = nvme_rdma_poll_group_create();
1414 	CU_ASSERT(tgroup == NULL);
1415 
1416 	/* Test2: Function nvme_rdma_poller_create failed */
1417 	MOCK_CLEAR(rdma_get_devices);
1418 	MOCK_SET(ibv_create_cq, NULL);
1419 
1420 	tgroup = nvme_rdma_poll_group_create();
1421 	CU_ASSERT(tgroup == NULL);
1422 
1423 	/* Test3: Function nvme_rdma_poll_group_create success */
1424 	MOCK_SET(ibv_create_cq, (struct ibv_cq *)0xFEEDBEEF);
1425 
1426 	tgroup = nvme_rdma_poll_group_create();
1427 	CU_ASSERT(tgroup != NULL);
1428 
1429 	group = nvme_rdma_poll_group(tgroup);
1430 	CU_ASSERT(group != NULL);
1431 
1432 	poller1 = STAILQ_FIRST(&group->pollers);
1433 	SPDK_CU_ASSERT_FATAL(poller1 != NULL);
1434 	poller2 = STAILQ_NEXT(poller1, link);
1435 	SPDK_CU_ASSERT_FATAL(poller2 != NULL);
1436 
1437 	CU_ASSERT(poller1->device == (struct ibv_context *)0xFEEDBEEF);
1438 	CU_ASSERT(poller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1439 	CU_ASSERT(poller1->required_num_wc == 0);
1440 	CU_ASSERT(poller2->device == (struct ibv_context *)0xDEADBEEF);
1441 	CU_ASSERT(poller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1442 	CU_ASSERT(poller2->required_num_wc == 0);
1443 
1444 	/* Case2: Test function nvme_rdma_poll_group_set_cq */
1445 	rqpair.qpair.poll_group = tgroup;
1446 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1447 	rqpair.cm_id = &cm_id;
1448 
1449 	/* Test1: Unable to find a cq for qpair on poll group */
1450 	cm_id.verbs = NULL;
1451 
1452 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1453 	CU_ASSERT(rc == -EINVAL);
1454 	CU_ASSERT(rqpair.cq == NULL);
1455 
1456 	/* Test2: Match cq success, current_num_wc is enough */
1457 	cm_id.verbs = (void *)0xFEEDBEEF;
1458 	rqpair.num_entries = 0;
1459 
1460 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1461 	CU_ASSERT(rc == 0);
1462 	CU_ASSERT(poller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1463 	CU_ASSERT(poller1->required_num_wc == 0);
1464 
1465 	/* Test3: Match cq success, function ibv_resize_cq failed */
1466 	rqpair.cq = NULL;
1467 	rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1;
1468 	MOCK_SET(ibv_resize_cq, -1);
1469 
1470 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1471 	CU_ASSERT(rc == -EPROTO);
1472 	CU_ASSERT(poller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1473 	CU_ASSERT(poller1->required_num_wc == 0);
1474 
1475 	/* Test4: Current_num_wc is not enough, resize success */
1476 	MOCK_SET(ibv_resize_cq, 0);
1477 
1478 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1479 	CU_ASSERT(rc == 0);
1480 	CU_ASSERT(poller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE * 2);
1481 	CU_ASSERT(poller1->required_num_wc == (DEFAULT_NVME_RDMA_CQ_SIZE - 1) * 2);
1482 	CU_ASSERT(rqpair.cq == poller1->cq);
1483 	CU_ASSERT(rqpair.poller == poller1);
1484 
1485 	rc = nvme_rdma_poll_group_destroy(tgroup);
1486 	CU_ASSERT(rc == 0);
1487 }
1488 
1489 int
1490 main(int argc, char **argv)
1491 {
1492 	CU_pSuite	suite = NULL;
1493 	unsigned int	num_failures;
1494 
1495 	CU_set_error_action(CUEA_ABORT);
1496 	CU_initialize_registry();
1497 
1498 	suite = CU_add_suite("nvme_rdma", NULL, NULL);
1499 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
1500 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
1501 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
1502 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
1503 	CU_ADD_TEST(suite, test_nvme_rdma_alloc_reqs);
1504 	CU_ADD_TEST(suite, test_nvme_rdma_alloc_rsps);
1505 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
1506 	CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
1507 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
1508 	CU_ADD_TEST(suite, test_nvme_rdma_mr_get_lkey);
1509 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct);
1510 	CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get);
1511 	CU_ADD_TEST(suite, test_nvme_rdma_req_init);
1512 	CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event);
1513 	CU_ADD_TEST(suite, test_nvme_rdma_register_and_unregister_reqs);
1514 	CU_ADD_TEST(suite, test_nvme_rdma_parse_addr);
1515 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
1516 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
1517 	CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
1518 	CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains);
1519 	CU_ADD_TEST(suite, test_rdma_get_memory_translation);
1520 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_qpair_by_id);
1521 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges);
1522 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats);
1523 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_set_cq);
1524 
1525 	CU_basic_set_mode(CU_BRM_VERBOSE);
1526 	CU_basic_run_tests();
1527 	num_failures = CU_get_number_of_failures();
1528 	CU_cleanup_registry();
1529 	return num_failures;
1530 }
1531