1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. All rights reserved. 3 * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 #include "spdk_internal/cunit.h" 8 #include "nvme/nvme_rdma.c" 9 #include "common/lib/nvme/common_stubs.h" 10 #include "common/lib/test_rdma.c" 11 12 SPDK_LOG_REGISTER_COMPONENT(nvme) 13 14 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 15 uint64_t size, uint64_t translation), 0); 16 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 17 uint64_t size), 0); 18 19 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 20 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 21 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 22 23 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0); 24 25 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests)); 26 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group, 27 uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0); 28 29 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0); 30 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list)); 31 DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0); 32 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel)); 33 34 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0); 35 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0); 36 37 DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *, 38 (struct spdk_memory_domain *device), NULL); 39 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type, 40 (struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA); 41 DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device)); 42 DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_domain, 43 void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov, 44 uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0); 45 DEFINE_STUB(spdk_rdma_cm_id_get_numa_id, int32_t, (struct rdma_cm_id *cm_id), 0); 46 47 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair, 48 struct spdk_nvme_cmd *cmd)); 49 50 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair, 51 struct spdk_nvme_cpl *cpl)); 52 53 DEFINE_RETURN_MOCK(spdk_memory_domain_create, int); 54 int 55 spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type, 56 struct spdk_memory_domain_ctx *ctx, const char *id) 57 { 58 static struct spdk_memory_domain *__dma_dev = (struct spdk_memory_domain *)0xdeaddead; 59 60 HANDLE_RETURN_MOCK(spdk_memory_domain_create); 61 62 *domain = __dma_dev; 63 64 return 0; 65 } 66 67 static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) }; 68 69 DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int); 70 int 71 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx, 72 struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx, 73 void *addr, size_t len, struct spdk_memory_domain_translation_result *result) 74 { 75 76 HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data); 77 78 memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation)); 79 80 return 0; 81 } 82 83 /* ibv_reg_mr can be a macro, need to undefine it */ 84 #ifdef ibv_reg_mr 85 #undef ibv_reg_mr 86 #endif 87 88 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *); 89 struct ibv_mr * 90 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) 91 { 92 HANDLE_RETURN_MOCK(ibv_reg_mr); 93 if (length > 0) { 94 return &g_rdma_mr; 95 } else { 96 return NULL; 97 } 98 } 99 100 struct nvme_rdma_ut_bdev_io { 101 struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS]; 102 int iovpos; 103 int iovcnt; 104 }; 105 106 DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **); 107 struct ibv_context ** 108 rdma_get_devices(int *num_devices) 109 { 110 static struct ibv_context *_contexts[] = { 111 (struct ibv_context *)0xDEADBEEF, 112 (struct ibv_context *)0xFEEDBEEF, 113 NULL 114 }; 115 116 HANDLE_RETURN_MOCK(rdma_get_devices); 117 return _contexts; 118 } 119 120 DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *); 121 struct rdma_event_channel * 122 rdma_create_event_channel(void) 123 { 124 HANDLE_RETURN_MOCK(rdma_create_event_channel); 125 return NULL; 126 } 127 128 DEFINE_RETURN_MOCK(ibv_query_device, int); 129 int 130 ibv_query_device(struct ibv_context *context, 131 struct ibv_device_attr *device_attr) 132 { 133 if (device_attr) { 134 device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS; 135 } 136 HANDLE_RETURN_MOCK(ibv_query_device); 137 138 return 0; 139 } 140 141 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */ 142 static void 143 nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset) 144 { 145 struct nvme_rdma_ut_bdev_io *bio = cb_arg; 146 struct iovec *iov; 147 148 for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) { 149 iov = &bio->iovs[bio->iovpos]; 150 /* Only provide offsets at the beginning of an iov */ 151 if (offset == 0) { 152 break; 153 } 154 155 offset -= iov->iov_len; 156 } 157 158 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS); 159 } 160 161 static int 162 nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length) 163 { 164 struct nvme_rdma_ut_bdev_io *bio = cb_arg; 165 struct iovec *iov; 166 167 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS); 168 169 if (bio->iovpos == bio->iovcnt) { 170 return -1; 171 } 172 173 iov = &bio->iovs[bio->iovpos]; 174 175 *address = iov->iov_base; 176 *length = iov->iov_len; 177 bio->iovpos++; 178 179 return 0; 180 } 181 182 static void 183 test_nvme_rdma_build_sgl_request(void) 184 { 185 struct nvme_rdma_qpair rqpair; 186 struct spdk_nvme_ctrlr ctrlr = {0}; 187 struct spdk_nvmf_cmd cmd = {{0}}; 188 struct spdk_nvme_rdma_req rdma_req = {0}; 189 struct nvme_request req = {{0}}; 190 struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS }; 191 uint64_t i; 192 int rc; 193 194 ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS; 195 ctrlr.cdata.nvmf_specific.msdbd = 16; 196 ctrlr.ioccsz_bytes = 4096; 197 198 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 199 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 200 rqpair.qpair.ctrlr = &ctrlr; 201 rqpair.cmds = &cmd; 202 cmd.sgl[0].address = 0x1111; 203 rdma_req.id = 0; 204 rdma_req.req = &req; 205 206 req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL); 207 req.qpair = &rqpair.qpair; 208 209 for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) { 210 bio.iovs[i].iov_base = (void *)0xF00000000 + i + 1; 211 bio.iovs[i].iov_len = 0; 212 } 213 214 /* Test case 1: single SGL. Expected: PASS */ 215 bio.iovpos = 0; 216 req.payload_offset = 0; 217 req.payload_size = 0x1000; 218 bio.iovs[0].iov_len = 0x1000; 219 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 220 SPDK_CU_ASSERT_FATAL(rc == 0); 221 CU_ASSERT(bio.iovpos == 1); 222 CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 223 CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 224 CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size); 225 CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY); 226 CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base); 227 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 228 229 /* Test case 2: multiple SGL. Expected: PASS */ 230 bio.iovpos = 0; 231 req.payload_offset = 0; 232 req.payload_size = 0x4000; 233 for (i = 0; i < 4; i++) { 234 bio.iovs[i].iov_len = 0x1000; 235 } 236 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 237 SPDK_CU_ASSERT_FATAL(rc == 0); 238 CU_ASSERT(bio.iovpos == 4); 239 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT); 240 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 241 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor)); 242 CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0); 243 CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof( 244 struct spdk_nvme_cmd)) 245 for (i = 0; i < 4; i++) { 246 CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 247 CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 248 CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len); 249 CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY); 250 CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base); 251 } 252 253 /* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */ 254 bio.iovpos = 0; 255 req.payload_offset = 0; 256 g_mr_size = 0x800; 257 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 258 SPDK_CU_ASSERT_FATAL(rc != 0); 259 CU_ASSERT(bio.iovpos == 1); 260 261 /* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */ 262 bio.iovpos = 0; 263 bio.iovcnt = 4; 264 req.payload_offset = 0; 265 req.payload_size = 0x6000; 266 g_mr_size = 0x0; 267 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 268 SPDK_CU_ASSERT_FATAL(rc != 0); 269 CU_ASSERT(bio.iovpos == bio.iovcnt); 270 bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS; 271 272 /* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */ 273 req.payload_size = 0x1000 + (1 << 24); 274 bio.iovs[0].iov_len = 0x1000; 275 bio.iovs[1].iov_len = 1 << 24; 276 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 277 SPDK_CU_ASSERT_FATAL(rc != 0); 278 279 /* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */ 280 ctrlr.ioccsz_bytes = 60; 281 bio.iovpos = 0; 282 req.payload_offset = 0; 283 req.payload_size = 0x4000; 284 for (i = 0; i < 4; i++) { 285 bio.iovs[i].iov_len = 0x1000; 286 } 287 rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req); 288 SPDK_CU_ASSERT_FATAL(rc == -1); 289 } 290 291 static void 292 test_nvme_rdma_build_sgl_inline_request(void) 293 { 294 struct nvme_rdma_qpair rqpair; 295 struct spdk_nvme_ctrlr ctrlr = {0}; 296 struct spdk_nvmf_cmd cmd = {{0}}; 297 struct spdk_nvme_rdma_req rdma_req = {0}; 298 struct nvme_request req = {{0}}; 299 struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS }; 300 int rc; 301 302 ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS; 303 ctrlr.cdata.nvmf_specific.msdbd = 16; 304 305 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 306 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 307 rqpair.qpair.ctrlr = &ctrlr; 308 rqpair.cmds = &cmd; 309 cmd.sgl[0].address = 0x1111; 310 rdma_req.id = 0; 311 rdma_req.req = &req; 312 313 req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL); 314 req.qpair = &rqpair.qpair; 315 316 /* Test case 1: single inline SGL. Expected: PASS */ 317 bio.iovpos = 0; 318 req.payload_offset = 0; 319 req.payload_size = 0x1000; 320 bio.iovs[0].iov_base = (void *)0xdeadbeef; 321 bio.iovs[0].iov_len = 0x1000; 322 rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req); 323 SPDK_CU_ASSERT_FATAL(rc == 0); 324 CU_ASSERT(bio.iovpos == 1); 325 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 326 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 327 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 328 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 329 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 330 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 331 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base); 332 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 333 334 /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */ 335 bio.iovpos = 0; 336 req.payload_offset = 0; 337 req.payload_size = 1 << 24; 338 bio.iovs[0].iov_len = 1 << 24; 339 rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req); 340 SPDK_CU_ASSERT_FATAL(rc == 0); 341 CU_ASSERT(bio.iovpos == 1); 342 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 343 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 344 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 345 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 346 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 347 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 348 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base); 349 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 350 } 351 352 static void 353 test_nvme_rdma_build_contig_request(void) 354 { 355 struct nvme_rdma_qpair rqpair; 356 struct spdk_nvme_ctrlr ctrlr = {0}; 357 struct spdk_nvmf_cmd cmd = {{0}}; 358 struct spdk_nvme_rdma_req rdma_req = {0}; 359 struct nvme_request req = {{0}}; 360 int rc; 361 362 ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS; 363 ctrlr.cdata.nvmf_specific.msdbd = 16; 364 365 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 366 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 367 rqpair.qpair.ctrlr = &ctrlr; 368 rqpair.cmds = &cmd; 369 cmd.sgl[0].address = 0x1111; 370 rdma_req.id = 0; 371 rdma_req.req = &req; 372 373 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 374 req.qpair = &rqpair.qpair; 375 376 /* Test case 1: contig request. Expected: PASS */ 377 req.payload_offset = 0; 378 req.payload_size = 0x1000; 379 rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req); 380 SPDK_CU_ASSERT_FATAL(rc == 0); 381 CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 382 CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 383 CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size); 384 CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY); 385 CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg); 386 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 387 388 /* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */ 389 req.payload_offset = 0; 390 req.payload_size = 1 << 24; 391 rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req); 392 SPDK_CU_ASSERT_FATAL(rc != 0); 393 } 394 395 static void 396 test_nvme_rdma_build_contig_inline_request(void) 397 { 398 struct nvme_rdma_qpair rqpair; 399 struct spdk_nvme_ctrlr ctrlr = {0}; 400 struct spdk_nvmf_cmd cmd = {{0}}; 401 struct spdk_nvme_rdma_req rdma_req = {0}; 402 struct nvme_request req = {{0}}; 403 int rc; 404 405 ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS; 406 ctrlr.cdata.nvmf_specific.msdbd = 16; 407 408 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 409 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 410 rqpair.qpair.ctrlr = &ctrlr; 411 rqpair.cmds = &cmd; 412 cmd.sgl[0].address = 0x1111; 413 rdma_req.id = 0; 414 rdma_req.req = &req; 415 416 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 417 req.qpair = &rqpair.qpair; 418 419 /* Test case 1: single inline SGL. Expected: PASS */ 420 req.payload_offset = 0; 421 req.payload_size = 0x1000; 422 rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req); 423 SPDK_CU_ASSERT_FATAL(rc == 0); 424 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 425 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 426 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 427 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 428 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 429 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 430 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg); 431 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 432 433 /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */ 434 req.payload_offset = 0; 435 req.payload_size = 1 << 24; 436 rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req); 437 SPDK_CU_ASSERT_FATAL(rc == 0); 438 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 439 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 440 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 441 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 442 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 443 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 444 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg); 445 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 446 } 447 448 static void 449 test_nvme_rdma_create_reqs(void) 450 { 451 struct nvme_rdma_qpair rqpair = {}; 452 int rc; 453 454 memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks)); 455 456 /* Test case 1: zero entry. Expect: FAIL */ 457 rqpair.num_entries = 0; 458 459 rc = nvme_rdma_create_reqs(&rqpair); 460 CU_ASSERT(rqpair.rdma_reqs == NULL); 461 SPDK_CU_ASSERT_FATAL(rc == -ENOMEM); 462 463 /* Test case 2: single entry. Expect: PASS */ 464 memset(&rqpair, 0, sizeof(rqpair)); 465 rqpair.num_entries = 1; 466 467 rc = nvme_rdma_create_reqs(&rqpair); 468 CU_ASSERT(rc == 0); 469 CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey); 470 CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr 471 == (uint64_t)&rqpair.cmds[0]); 472 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id 473 == (uint64_t)&rqpair.rdma_reqs[0].rdma_wr); 474 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL); 475 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND); 476 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED); 477 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list 478 == rqpair.rdma_reqs[0].send_sgl); 479 CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0); 480 spdk_free(rqpair.rdma_reqs); 481 spdk_free(rqpair.cmds); 482 483 /* Test case 3: multiple entries. Expect: PASS */ 484 memset(&rqpair, 0, sizeof(rqpair)); 485 rqpair.num_entries = 5; 486 487 rc = nvme_rdma_create_reqs(&rqpair); 488 CU_ASSERT(rc == 0); 489 for (int i = 0; i < 5; i++) { 490 CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == g_rdma_mr.lkey); 491 CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr 492 == (uint64_t)&rqpair.cmds[i]); 493 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id 494 == (uint64_t)&rqpair.rdma_reqs[i].rdma_wr); 495 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL); 496 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND); 497 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags 498 == IBV_SEND_SIGNALED); 499 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list 500 == rqpair.rdma_reqs[i].send_sgl); 501 CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0); 502 } 503 spdk_free(rqpair.rdma_reqs); 504 spdk_free(rqpair.cmds); 505 } 506 507 static void 508 test_nvme_rdma_create_rsps(void) 509 { 510 struct nvme_rdma_rsp_opts opts = {}; 511 struct nvme_rdma_rsps *rsps; 512 struct spdk_rdma_provider_qp *rdma_qp = (struct spdk_rdma_provider_qp *)0xfeedf00d; 513 struct nvme_rdma_qpair rqpair = { .rdma_qp = rdma_qp, }; 514 515 memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks)); 516 517 opts.rqpair = &rqpair; 518 519 /* Test case 1 calloc false */ 520 opts.num_entries = 0; 521 rsps = nvme_rdma_create_rsps(&opts); 522 SPDK_CU_ASSERT_FATAL(rsps == NULL); 523 524 /* Test case 2 calloc success */ 525 opts.num_entries = 1; 526 527 rsps = nvme_rdma_create_rsps(&opts); 528 SPDK_CU_ASSERT_FATAL(rsps != NULL); 529 CU_ASSERT(rsps->rsp_sgls != NULL); 530 CU_ASSERT(rsps->rsp_recv_wrs != NULL); 531 CU_ASSERT(rsps->rsps != NULL); 532 CU_ASSERT(rsps->rsp_sgls[0].lkey == g_rdma_mr.lkey); 533 CU_ASSERT(rsps->rsp_sgls[0].addr == (uint64_t)&rsps->rsps[0]); 534 CU_ASSERT(rsps->rsp_recv_wrs[0].wr_id == (uint64_t)&rsps->rsps[0].rdma_wr); 535 536 nvme_rdma_free_rsps(rsps); 537 } 538 539 static void 540 test_nvme_rdma_ctrlr_create_qpair(void) 541 { 542 struct spdk_nvme_ctrlr ctrlr = {}; 543 uint16_t qid, qsize; 544 struct spdk_nvme_qpair *qpair; 545 struct nvme_rdma_qpair *rqpair; 546 547 /* Test case 1: max qsize. Expect: PASS */ 548 qsize = 0xffff; 549 qid = 1; 550 551 qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize, 552 SPDK_NVME_QPRIO_URGENT, 1, 553 false, false); 554 CU_ASSERT(qpair != NULL); 555 rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair); 556 CU_ASSERT(qpair == &rqpair->qpair); 557 CU_ASSERT(rqpair->num_entries == qsize - 1); 558 CU_ASSERT(rqpair->delay_cmd_submit == false); 559 560 spdk_free(rqpair); 561 rqpair = NULL; 562 563 /* Test case 2: queue size 2. Expect: PASS */ 564 qsize = 2; 565 qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize, 566 SPDK_NVME_QPRIO_URGENT, 1, 567 false, false); 568 CU_ASSERT(qpair != NULL); 569 rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair); 570 CU_ASSERT(rqpair->num_entries == qsize - 1); 571 572 spdk_free(rqpair); 573 rqpair = NULL; 574 575 /* Test case 3: queue size zero. Expect: FAIL */ 576 qsize = 0; 577 578 qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize, 579 SPDK_NVME_QPRIO_URGENT, 1, 580 false, false); 581 SPDK_CU_ASSERT_FATAL(qpair == NULL); 582 583 /* Test case 4: queue size 1. Expect: FAIL */ 584 qsize = 1; 585 qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize, 586 SPDK_NVME_QPRIO_URGENT, 1, 587 false, false); 588 SPDK_CU_ASSERT_FATAL(qpair == NULL); 589 } 590 591 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context, 592 struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF); 593 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0); 594 595 static void 596 test_nvme_rdma_poller_create(void) 597 { 598 struct nvme_rdma_poll_group group = {}; 599 struct ibv_context context = { 600 .device = (struct ibv_device *)0xDEADBEEF 601 }; 602 struct ibv_context context_2 = { 603 .device = (struct ibv_device *)0xBAADBEEF 604 }; 605 struct nvme_rdma_poller *poller_1, *poller_2, *poller_3; 606 607 /* Case: calloc and ibv not need to fail test */ 608 STAILQ_INIT(&group.pollers); 609 610 poller_1 = nvme_rdma_poll_group_get_poller(&group, &context); 611 SPDK_CU_ASSERT_FATAL(poller_1 != NULL); 612 CU_ASSERT(group.num_pollers == 1); 613 CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_1); 614 CU_ASSERT(poller_1->refcnt == 1); 615 CU_ASSERT(poller_1->device == &context); 616 CU_ASSERT(poller_1->cq == (struct ibv_cq *)0xFEEDBEEF); 617 CU_ASSERT(poller_1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE); 618 CU_ASSERT(poller_1->required_num_wc == 0); 619 620 poller_2 = nvme_rdma_poll_group_get_poller(&group, &context_2); 621 SPDK_CU_ASSERT_FATAL(poller_2 != NULL); 622 CU_ASSERT(group.num_pollers == 2); 623 CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_2); 624 CU_ASSERT(poller_2->refcnt == 1); 625 CU_ASSERT(poller_2->device == &context_2); 626 627 poller_3 = nvme_rdma_poll_group_get_poller(&group, &context); 628 SPDK_CU_ASSERT_FATAL(poller_3 != NULL); 629 CU_ASSERT(poller_3 == poller_1); 630 CU_ASSERT(group.num_pollers == 2); 631 CU_ASSERT(poller_3->refcnt == 2); 632 633 nvme_rdma_poll_group_put_poller(&group, poller_2); 634 CU_ASSERT(group.num_pollers == 1); 635 636 nvme_rdma_poll_group_put_poller(&group, poller_1); 637 CU_ASSERT(group.num_pollers == 1); 638 CU_ASSERT(poller_3->refcnt == 1); 639 640 nvme_rdma_poll_group_put_poller(&group, poller_3); 641 CU_ASSERT(STAILQ_EMPTY(&group.pollers)); 642 CU_ASSERT(group.num_pollers == 0); 643 644 nvme_rdma_poll_group_free_pollers(&group); 645 } 646 647 static void 648 test_nvme_rdma_qpair_process_cm_event(void) 649 { 650 struct nvme_rdma_qpair rqpair = {}; 651 struct rdma_cm_event event = {}; 652 struct spdk_nvmf_rdma_accept_private_data accept_data = {}; 653 int rc = 0; 654 655 /* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */ 656 rqpair.evt = &event; 657 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 658 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 659 CU_ASSERT(rc == 0); 660 661 /* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */ 662 rqpair.evt = &event; 663 event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 664 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 665 CU_ASSERT(rc == 0); 666 667 /* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */ 668 rqpair.evt = &event; 669 event.event = RDMA_CM_EVENT_CONNECT_ERROR; 670 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 671 CU_ASSERT(rc == 0); 672 673 /* case4: event == RDMA_CM_EVENT_UNREACHABLE */ 674 rqpair.evt = &event; 675 event.event = RDMA_CM_EVENT_UNREACHABLE; 676 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 677 CU_ASSERT(rc == 0); 678 679 /* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */ 680 rqpair.evt = &event; 681 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 682 event.param.conn.private_data = NULL; 683 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 684 CU_ASSERT(rc == -1); 685 686 rqpair.evt = &event; 687 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 688 event.param.conn.private_data = &accept_data; 689 accept_data.crqsize = 512; 690 rqpair.num_entries = 1024; 691 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 692 CU_ASSERT(rc == 0); 693 CU_ASSERT(rqpair.num_entries == 1024); 694 695 /* case6: event == RDMA_CM_EVENT_DISCONNECTED */ 696 rqpair.evt = &event; 697 event.event = RDMA_CM_EVENT_DISCONNECTED; 698 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 699 CU_ASSERT(rc == 0); 700 CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE); 701 702 /* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */ 703 rqpair.evt = &event; 704 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 705 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 706 CU_ASSERT(rc == 0); 707 CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL); 708 709 /* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */ 710 rqpair.evt = &event; 711 event.event = RDMA_CM_EVENT_MULTICAST_JOIN; 712 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 713 CU_ASSERT(rc == 0); 714 715 /* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */ 716 rqpair.evt = &event; 717 event.event = RDMA_CM_EVENT_ADDR_CHANGE; 718 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 719 CU_ASSERT(rc == 0); 720 CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL); 721 722 /* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */ 723 rqpair.evt = &event; 724 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; 725 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 726 CU_ASSERT(rc == 0); 727 728 /* case11: default event == 0xFF */ 729 rqpair.evt = &event; 730 event.event = 0xFF; 731 rc = nvme_rdma_qpair_process_cm_event(&rqpair); 732 CU_ASSERT(rc == 0); 733 } 734 735 static void 736 test_nvme_rdma_ctrlr_construct(void) 737 { 738 struct spdk_nvme_ctrlr *ctrlr; 739 struct spdk_nvme_transport_id trid = {}; 740 struct spdk_nvme_ctrlr_opts opts = {}; 741 struct nvme_rdma_qpair *rqpair = NULL; 742 struct nvme_rdma_ctrlr *rctrlr = NULL; 743 struct rdma_event_channel cm_channel = {}; 744 void *devhandle = NULL; 745 int rc; 746 747 opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1; 748 opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1; 749 opts.admin_queue_size = 0xFFFF; 750 trid.trtype = SPDK_NVME_TRANSPORT_RDMA; 751 trid.adrfam = SPDK_NVMF_ADRFAM_IPV4; 752 MOCK_SET(rdma_create_event_channel, &cm_channel); 753 754 ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle); 755 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 756 CU_ASSERT(ctrlr->opts.transport_retry_count == 757 NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT); 758 CU_ASSERT(ctrlr->opts.transport_ack_timeout == 759 NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT); 760 CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size); 761 rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr); 762 CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS); 763 CU_ASSERT(rctrlr->cm_channel == &cm_channel); 764 CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid, 765 (char *)&trid, sizeof(trid))); 766 767 SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL); 768 rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair); 769 CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1); 770 CU_ASSERT(rqpair->delay_cmd_submit == false); 771 MOCK_CLEAR(rdma_create_event_channel); 772 773 /* Hardcode the trtype, because nvme_qpair_init() is stub function. */ 774 rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA; 775 rc = nvme_rdma_ctrlr_destruct(ctrlr); 776 CU_ASSERT(rc == 0); 777 } 778 779 static void 780 test_nvme_rdma_req_put_and_get(void) 781 { 782 struct nvme_rdma_qpair rqpair = {}; 783 struct spdk_nvme_rdma_req rdma_req = {}; 784 struct spdk_nvme_rdma_req *rdma_req_get; 785 786 /* case 1: nvme_rdma_req_put */ 787 TAILQ_INIT(&rqpair.free_reqs); 788 rdma_req.completion_flags = 1; 789 rdma_req.req = (struct nvme_request *)0xDEADBEFF; 790 rdma_req.id = 10086; 791 nvme_rdma_req_put(&rqpair, &rdma_req); 792 793 CU_ASSERT(rqpair.free_reqs.tqh_first == &rdma_req); 794 CU_ASSERT(rqpair.free_reqs.tqh_first->completion_flags == 0); 795 CU_ASSERT(rqpair.free_reqs.tqh_first->req == NULL); 796 CU_ASSERT(rqpair.free_reqs.tqh_first->id == 10086); 797 CU_ASSERT(rdma_req.completion_flags == 0); 798 CU_ASSERT(rdma_req.req == NULL); 799 800 /* case 2: nvme_rdma_req_get */ 801 rdma_req_get = nvme_rdma_req_get(&rqpair); 802 CU_ASSERT(rdma_req_get == &rdma_req); 803 CU_ASSERT(rdma_req_get->id == 10086); 804 CU_ASSERT(rqpair.free_reqs.tqh_first == NULL); 805 } 806 807 static void 808 test_nvme_rdma_req_init(void) 809 { 810 struct nvme_rdma_qpair rqpair = {}; 811 struct spdk_nvme_ctrlr ctrlr = {}; 812 struct spdk_nvmf_cmd cmd = {}; 813 struct spdk_nvme_rdma_req rdma_req = {}; 814 struct nvme_request req = {}; 815 struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS }; 816 int rc = 1; 817 818 ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS; 819 ctrlr.cdata.nvmf_specific.msdbd = 16; 820 821 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 822 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 823 rqpair.qpair.ctrlr = &ctrlr; 824 rqpair.cmds = &cmd; 825 cmd.sgl[0].address = 0x1111; 826 rdma_req.id = 0; 827 req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 828 829 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 830 /* case 1: req->payload_size == 0, expect: pass. */ 831 req.payload_size = 0; 832 rqpair.qpair.ctrlr->ioccsz_bytes = 1024; 833 rqpair.qpair.ctrlr->icdoff = 0; 834 rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req); 835 CU_ASSERT(rc == 0); 836 CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG); 837 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 838 CU_ASSERT(rdma_req.send_wr.num_sge == 1); 839 CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 840 CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 841 CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == 0); 842 CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == 0); 843 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 844 845 /* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */ 846 /* icd_supported is true */ 847 rdma_req.req = NULL; 848 rqpair.qpair.ctrlr->icdoff = 0; 849 req.payload_offset = 0; 850 req.payload_size = 1024; 851 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 852 rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req); 853 CU_ASSERT(rc == 0); 854 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 855 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 856 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 857 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 858 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 859 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 860 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg); 861 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 862 863 /* icd_supported is false */ 864 rdma_req.req = NULL; 865 rqpair.qpair.ctrlr->icdoff = 1; 866 req.payload_offset = 0; 867 req.payload_size = 1024; 868 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 869 rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req); 870 CU_ASSERT(rc == 0); 871 CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 872 CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 873 CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size); 874 CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY); 875 CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg); 876 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 877 878 /* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */ 879 /* icd_supported is true */ 880 rdma_req.req = NULL; 881 rqpair.qpair.ctrlr->icdoff = 0; 882 req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL); 883 req.qpair = &rqpair.qpair; 884 bio.iovpos = 0; 885 req.payload_offset = 0; 886 req.payload_size = 1024; 887 bio.iovs[0].iov_base = (void *)0xdeadbeef; 888 bio.iovs[0].iov_len = 1024; 889 rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req); 890 CU_ASSERT(rc == 0); 891 CU_ASSERT(bio.iovpos == 1); 892 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 893 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET); 894 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size); 895 CU_ASSERT(req.cmd.dptr.sgl1.address == 0); 896 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 897 CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size); 898 CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base); 899 CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY); 900 901 /* icd_supported is false */ 902 rdma_req.req = NULL; 903 rqpair.qpair.ctrlr->icdoff = 1; 904 req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL); 905 req.qpair = &rqpair.qpair; 906 bio.iovpos = 0; 907 req.payload_offset = 0; 908 req.payload_size = 1024; 909 bio.iovs[0].iov_base = (void *)0xdeadbeef; 910 bio.iovs[0].iov_len = 1024; 911 rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req); 912 CU_ASSERT(rc == 0); 913 CU_ASSERT(bio.iovpos == 1); 914 CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK); 915 CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS); 916 CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size); 917 CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY); 918 CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base); 919 CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd)); 920 } 921 922 static void 923 test_nvme_rdma_validate_cm_event(void) 924 { 925 enum rdma_cm_event_type expected_evt_type; 926 struct rdma_cm_event reaped_evt = {}; 927 int rc; 928 929 /* case 1: expected_evt_type == reaped_evt->event, expect: pass */ 930 expected_evt_type = RDMA_CM_EVENT_ADDR_RESOLVED; 931 reaped_evt.event = RDMA_CM_EVENT_ADDR_RESOLVED; 932 933 rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt); 934 CU_ASSERT(rc == 0); 935 936 /* case 2: expected_evt_type != RDMA_CM_EVENT_ESTABLISHED and is not equal to reaped_evt->event, expect: fail */ 937 reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 938 939 rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt); 940 CU_ASSERT(rc == -EBADMSG); 941 942 /* case 3: expected_evt_type == RDMA_CM_EVENT_ESTABLISHED */ 943 expected_evt_type = RDMA_CM_EVENT_ESTABLISHED; 944 /* reaped_evt->event == RDMA_CM_EVENT_REJECTED and reaped_evt->status == 10, expect: fail */ 945 reaped_evt.event = RDMA_CM_EVENT_REJECTED; 946 reaped_evt.status = 10; 947 948 rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt); 949 CU_ASSERT(rc == -ESTALE); 950 951 /* reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE, expect: pass */ 952 reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 953 954 rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt); 955 CU_ASSERT(rc == 0); 956 } 957 958 static void 959 test_nvme_rdma_qpair_init(void) 960 { 961 struct nvme_rdma_qpair rqpair = {}; 962 struct rdma_cm_id cm_id = {}; 963 struct ibv_pd *pd = (struct ibv_pd *)0xfeedbeef; 964 struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xf00dfeed; 965 struct ibv_qp qp = { .pd = pd }; 966 struct nvme_rdma_ctrlr rctrlr = {}; 967 int rc = 0; 968 969 rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA; 970 rqpair.cm_id = &cm_id; 971 g_nvme_hooks.get_ibv_pd = NULL; 972 rqpair.qpair.poll_group = NULL; 973 rqpair.qpair.ctrlr = &rctrlr.ctrlr; 974 g_spdk_rdma_qp.qp = &qp; 975 MOCK_SET(spdk_rdma_utils_get_pd, pd); 976 MOCK_SET(spdk_rdma_utils_get_memory_domain, domain); 977 978 rc = nvme_rdma_qpair_init(&rqpair); 979 CU_ASSERT(rc == 0); 980 981 CU_ASSERT(rqpair.cm_id->context == &rqpair.qpair); 982 CU_ASSERT(rqpair.max_send_sge == NVME_RDMA_DEFAULT_TX_SGE); 983 CU_ASSERT(rqpair.max_recv_sge == NVME_RDMA_DEFAULT_RX_SGE); 984 CU_ASSERT(rqpair.current_num_sends == 0); 985 CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF); 986 CU_ASSERT(rqpair.memory_domain == domain); 987 988 MOCK_CLEAR(spdk_rdma_utils_get_pd); 989 MOCK_CLEAR(spdk_rdma_utils_get_memory_domain); 990 } 991 992 static void 993 test_nvme_rdma_qpair_submit_request(void) 994 { 995 int rc; 996 struct nvme_rdma_qpair rqpair = {}; 997 struct spdk_nvme_ctrlr ctrlr = {}; 998 struct nvme_request req = {}; 999 struct nvme_rdma_poller poller = {}; 1000 struct spdk_nvme_rdma_req *rdma_req = NULL; 1001 1002 req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER; 1003 req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL); 1004 req.payload_size = 0; 1005 rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef; 1006 rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef; 1007 rqpair.qpair.ctrlr = &ctrlr; 1008 rqpair.num_entries = 1; 1009 rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA; 1010 rqpair.poller = &poller; 1011 1012 rc = nvme_rdma_create_reqs(&rqpair); 1013 CU_ASSERT(rc == 0); 1014 /* Give send_wr.next a non null value */ 1015 rdma_req = TAILQ_FIRST(&rqpair.free_reqs); 1016 SPDK_CU_ASSERT_FATAL(rdma_req != NULL); 1017 rdma_req->send_wr.next = (void *)0xdeadbeef; 1018 1019 rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req); 1020 CU_ASSERT(rc == 0); 1021 CU_ASSERT(rqpair.current_num_sends == 1); 1022 CU_ASSERT(rdma_req->send_wr.next == NULL); 1023 TAILQ_REMOVE(&rqpair.outstanding_reqs, rdma_req, link); 1024 CU_ASSERT(TAILQ_EMPTY(&rqpair.outstanding_reqs)); 1025 1026 /* No request available */ 1027 rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req); 1028 CU_ASSERT(rc == -EAGAIN); 1029 CU_ASSERT(rqpair.poller->stats.queued_requests == 1); 1030 1031 nvme_rdma_free_reqs(&rqpair); 1032 } 1033 1034 static void 1035 test_rdma_ctrlr_get_memory_domains(void) 1036 { 1037 struct nvme_rdma_ctrlr rctrlr = {}; 1038 struct nvme_rdma_qpair rqpair = {}; 1039 struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef; 1040 struct spdk_memory_domain *domains[1] = {NULL}; 1041 1042 rqpair.memory_domain = domain; 1043 rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA; 1044 rctrlr.ctrlr.adminq = &rqpair.qpair; 1045 1046 /* Test 1, input domains pointer is NULL */ 1047 CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 1) == 1); 1048 1049 /* Test 2, input array_size is 0 */ 1050 CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 0) == 1); 1051 CU_ASSERT(domains[0] == NULL); 1052 1053 /* Test 3, both input domains pointer and array_size are NULL/0 */ 1054 CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 0) == 1); 1055 1056 /* Test 2, input parameters are valid */ 1057 CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 1) == 1); 1058 CU_ASSERT(domains[0] == domain); 1059 } 1060 1061 static void 1062 test_rdma_get_memory_translation(void) 1063 { 1064 struct ibv_qp qp = {.pd = (struct ibv_pd *) 0xfeedbeef}; 1065 struct spdk_rdma_provider_qp rdma_qp = {.qp = &qp}; 1066 struct nvme_rdma_qpair rqpair = {.rdma_qp = &rdma_qp}; 1067 struct spdk_nvme_ns_cmd_ext_io_opts io_opts = { 1068 .memory_domain = (struct spdk_memory_domain *) 0xdeaddead 1069 }; 1070 struct nvme_request req = {.payload = {.opts = &io_opts}}; 1071 struct nvme_rdma_memory_translation_ctx ctx = { 1072 .addr = (void *) 0xBAADF00D, 1073 .length = 0x100 1074 }; 1075 int rc; 1076 1077 rqpair.memory_domain = (struct spdk_memory_domain *) 0xfeedbeef; 1078 1079 /* case 1, using extended IO opts with DMA device. 1080 * Test 1 - spdk_dma_translate_data error, expect fail */ 1081 MOCK_SET(spdk_memory_domain_translate_data, -1); 1082 rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx); 1083 CU_ASSERT(rc != 0); 1084 MOCK_CLEAR(spdk_memory_domain_translate_data); 1085 1086 /* Test 2 - expect pass */ 1087 g_memory_translation_translation.iov_count = 1; 1088 g_memory_translation_translation.iov.iov_base = ctx.addr + 1; 1089 g_memory_translation_translation.iov.iov_len = ctx.length; 1090 g_memory_translation_translation.rdma.lkey = 123; 1091 g_memory_translation_translation.rdma.rkey = 321; 1092 1093 rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx); 1094 CU_ASSERT(rc == 0); 1095 CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey); 1096 CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey); 1097 CU_ASSERT(ctx.addr == g_memory_translation_translation.iov.iov_base); 1098 CU_ASSERT(ctx.length == g_memory_translation_translation.iov.iov_len); 1099 1100 /* case 2, using rdma translation 1101 * Test 1 - spdk_rdma_get_translation error, expect fail */ 1102 req.payload.opts = NULL; 1103 MOCK_SET(spdk_rdma_utils_get_translation, -1); 1104 rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx); 1105 CU_ASSERT(rc != 0); 1106 MOCK_CLEAR(spdk_rdma_utils_get_translation); 1107 1108 /* Test 2 - expect pass */ 1109 rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx); 1110 CU_ASSERT(rc == 0); 1111 CU_ASSERT(ctx.lkey == RDMA_UT_LKEY); 1112 CU_ASSERT(ctx.rkey == RDMA_UT_RKEY); 1113 } 1114 1115 static void 1116 test_get_rdma_qpair_from_wc(void) 1117 { 1118 const uint32_t test_qp_num = 123; 1119 struct nvme_rdma_poll_group group = {}; 1120 struct nvme_rdma_qpair rqpair = {}; 1121 struct spdk_rdma_provider_qp rdma_qp = {}; 1122 struct ibv_qp qp = { .qp_num = test_qp_num }; 1123 struct ibv_wc wc = { .qp_num = test_qp_num }; 1124 1125 STAILQ_INIT(&group.group.disconnected_qpairs); 1126 STAILQ_INIT(&group.group.connected_qpairs); 1127 rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA; 1128 1129 /* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists. 1130 * get_rdma_qpair_from_wc must return NULL */ 1131 STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq); 1132 CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL); 1133 STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq); 1134 1135 STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq); 1136 CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL); 1137 STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq); 1138 1139 /* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */ 1140 rdma_qp.qp = &qp; 1141 rqpair.rdma_qp = &rdma_qp; 1142 1143 STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq); 1144 CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair); 1145 STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq); 1146 1147 STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq); 1148 CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair); 1149 STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq); 1150 } 1151 1152 static void 1153 test_nvme_rdma_ctrlr_get_max_sges(void) 1154 { 1155 struct nvme_rdma_ctrlr rctrlr = {}; 1156 1157 rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA; 1158 rctrlr.max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS; 1159 rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16; 1160 rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096; 1161 CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16); 1162 1163 rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 32; 1164 rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096; 1165 CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16); 1166 1167 rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 8; 1168 rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096; 1169 CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 8); 1170 1171 rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16; 1172 rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4; 1173 CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 1); 1174 1175 rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16; 1176 rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 6; 1177 CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 2); 1178 } 1179 1180 static void 1181 test_nvme_rdma_poll_group_get_stats(void) 1182 { 1183 int rc = -1; 1184 struct spdk_nvme_transport_poll_group_stat *tpointer = NULL; 1185 struct nvme_rdma_poll_group tgroup = {}; 1186 struct ibv_device dev1, dev2 = {}; 1187 struct ibv_context contexts1, contexts2 = {}; 1188 struct nvme_rdma_poller *tpoller1 = NULL; 1189 struct nvme_rdma_poller *tpoller2 = NULL; 1190 1191 memcpy(dev1.name, "/dev/test1", sizeof("/dev/test1")); 1192 memcpy(dev2.name, "/dev/test2", sizeof("/dev/test2")); 1193 contexts1.device = &dev1; 1194 contexts2.device = &dev2; 1195 1196 /* Initialization */ 1197 STAILQ_INIT(&tgroup.pollers); 1198 tpoller2 = nvme_rdma_poller_create(&tgroup, &contexts1); 1199 SPDK_CU_ASSERT_FATAL(tpoller2 != NULL); 1200 CU_ASSERT(tgroup.num_pollers == 1); 1201 1202 tpoller1 = nvme_rdma_poller_create(&tgroup, &contexts2); 1203 SPDK_CU_ASSERT_FATAL(tpoller1 != NULL); 1204 CU_ASSERT(tgroup.num_pollers == 2); 1205 CU_ASSERT(&tgroup.pollers != NULL); 1206 1207 CU_ASSERT(tpoller1->device == &contexts2); 1208 CU_ASSERT(tpoller2->device == &contexts1); 1209 CU_ASSERT(strcmp(tpoller1->device->device->name, "/dev/test2") == 0); 1210 CU_ASSERT(strcmp(tpoller2->device->device->name, "/dev/test1") == 0); 1211 CU_ASSERT(tpoller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE); 1212 CU_ASSERT(tpoller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE); 1213 CU_ASSERT(tpoller1->required_num_wc == 0); 1214 CU_ASSERT(tpoller2->required_num_wc == 0); 1215 1216 /* Test1: Invalid stats */ 1217 rc = nvme_rdma_poll_group_get_stats(NULL, &tpointer); 1218 CU_ASSERT(rc == -EINVAL); 1219 1220 /* Test2: Invalid group pointer */ 1221 rc = nvme_rdma_poll_group_get_stats(&tgroup.group, NULL); 1222 CU_ASSERT(rc == -EINVAL); 1223 1224 /* Test3: Success member variables should be correct */ 1225 tpoller1->stats.polls = 111; 1226 tpoller1->stats.idle_polls = 112; 1227 tpoller1->stats.completions = 113; 1228 tpoller1->stats.queued_requests = 114; 1229 tpoller1->stats.rdma_stats.send.num_submitted_wrs = 121; 1230 tpoller1->stats.rdma_stats.send.doorbell_updates = 122; 1231 tpoller1->stats.rdma_stats.recv.num_submitted_wrs = 131; 1232 tpoller1->stats.rdma_stats.recv.doorbell_updates = 132; 1233 tpoller2->stats.polls = 211; 1234 tpoller2->stats.idle_polls = 212; 1235 tpoller2->stats.completions = 213; 1236 tpoller2->stats.queued_requests = 214; 1237 tpoller2->stats.rdma_stats.send.num_submitted_wrs = 221; 1238 tpoller2->stats.rdma_stats.send.doorbell_updates = 222; 1239 tpoller2->stats.rdma_stats.recv.num_submitted_wrs = 231; 1240 tpoller2->stats.rdma_stats.recv.doorbell_updates = 232; 1241 1242 rc = nvme_rdma_poll_group_get_stats(&tgroup.group, &tpointer); 1243 CU_ASSERT(rc == 0); 1244 CU_ASSERT(tpointer != NULL); 1245 CU_ASSERT(tpointer->trtype == SPDK_NVME_TRANSPORT_RDMA); 1246 CU_ASSERT(tpointer->rdma.num_devices == tgroup.num_pollers); 1247 CU_ASSERT(tpointer->rdma.device_stats != NULL); 1248 1249 CU_ASSERT(strcmp(tpointer->rdma.device_stats[0].name, "/dev/test2") == 0); 1250 CU_ASSERT(tpointer->rdma.device_stats[0].polls == 111); 1251 CU_ASSERT(tpointer->rdma.device_stats[0].idle_polls == 112); 1252 CU_ASSERT(tpointer->rdma.device_stats[0].completions == 113); 1253 CU_ASSERT(tpointer->rdma.device_stats[0].queued_requests == 114); 1254 CU_ASSERT(tpointer->rdma.device_stats[0].total_send_wrs == 121); 1255 CU_ASSERT(tpointer->rdma.device_stats[0].send_doorbell_updates == 122); 1256 CU_ASSERT(tpointer->rdma.device_stats[0].total_recv_wrs == 131); 1257 CU_ASSERT(tpointer->rdma.device_stats[0].recv_doorbell_updates == 132); 1258 1259 CU_ASSERT(strcmp(tpointer->rdma.device_stats[1].name, "/dev/test1") == 0); 1260 CU_ASSERT(tpointer->rdma.device_stats[1].polls == 211); 1261 CU_ASSERT(tpointer->rdma.device_stats[1].idle_polls == 212); 1262 CU_ASSERT(tpointer->rdma.device_stats[1].completions == 213); 1263 CU_ASSERT(tpointer->rdma.device_stats[1].queued_requests == 214); 1264 CU_ASSERT(tpointer->rdma.device_stats[1].total_send_wrs == 221); 1265 CU_ASSERT(tpointer->rdma.device_stats[1].send_doorbell_updates == 222); 1266 CU_ASSERT(tpointer->rdma.device_stats[1].total_recv_wrs == 231); 1267 CU_ASSERT(tpointer->rdma.device_stats[1].recv_doorbell_updates == 232); 1268 1269 nvme_rdma_poll_group_free_stats(&tgroup.group, tpointer); 1270 nvme_rdma_poll_group_free_pollers(&tgroup); 1271 } 1272 1273 static void 1274 test_nvme_rdma_qpair_set_poller(void) 1275 { 1276 int rc = -1; 1277 struct nvme_rdma_poll_group *group; 1278 struct spdk_nvme_transport_poll_group *tgroup; 1279 struct nvme_rdma_poller *poller; 1280 struct nvme_rdma_qpair rqpair = {}; 1281 struct rdma_cm_id cm_id = {}; 1282 1283 /* Case1: Test function nvme_rdma_poll_group_create */ 1284 /* Test1: Function nvme_rdma_poll_group_create success */ 1285 tgroup = nvme_rdma_poll_group_create(); 1286 SPDK_CU_ASSERT_FATAL(tgroup != NULL); 1287 1288 group = nvme_rdma_poll_group(tgroup); 1289 CU_ASSERT(group != NULL); 1290 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1291 1292 /* Case2: Test function nvme_rdma_qpair_set_poller */ 1293 rqpair.qpair.poll_group = tgroup; 1294 rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA; 1295 rqpair.cm_id = &cm_id; 1296 1297 /* Test1: Function ibv_create_cq failed */ 1298 cm_id.verbs = (void *)0xFEEDBEEF; 1299 MOCK_SET(ibv_create_cq, NULL); 1300 1301 rc = nvme_rdma_qpair_set_poller(&rqpair.qpair); 1302 CU_ASSERT(rc == -EINVAL); 1303 CU_ASSERT(rqpair.cq == NULL); 1304 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1305 1306 MOCK_CLEAR(ibv_create_cq); 1307 1308 /* Test2: Unable to find a cq for qpair on poll group */ 1309 cm_id.verbs = NULL; 1310 1311 rc = nvme_rdma_qpair_set_poller(&rqpair.qpair); 1312 CU_ASSERT(rc == -EINVAL); 1313 CU_ASSERT(rqpair.cq == NULL); 1314 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1315 1316 /* Test3: Match cq success, current_num_wc is enough */ 1317 MOCK_SET(ibv_create_cq, (struct ibv_cq *)0xFEEDBEEF); 1318 1319 cm_id.verbs = (void *)0xFEEDBEEF; 1320 rqpair.num_entries = 0; 1321 1322 rc = nvme_rdma_qpair_set_poller(&rqpair.qpair); 1323 CU_ASSERT(rc == 0); 1324 CU_ASSERT(rqpair.cq == (void *)0xFEEDBEEF); 1325 1326 poller = STAILQ_FIRST(&group->pollers); 1327 SPDK_CU_ASSERT_FATAL(poller != NULL); 1328 CU_ASSERT(STAILQ_NEXT(poller, link) == NULL); 1329 CU_ASSERT(poller->device == (struct ibv_context *)0xFEEDBEEF); 1330 CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE); 1331 CU_ASSERT(poller->required_num_wc == 0); 1332 CU_ASSERT(rqpair.poller == poller); 1333 1334 rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs; 1335 1336 nvme_rdma_poll_group_put_poller(group, rqpair.poller); 1337 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1338 1339 rqpair.qpair.poll_group_tailq_head = &tgroup->connected_qpairs; 1340 1341 /* Test4: Match cq success, function ibv_resize_cq failed */ 1342 rqpair.cq = NULL; 1343 rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1; 1344 MOCK_SET(ibv_resize_cq, -1); 1345 1346 rc = nvme_rdma_qpair_set_poller(&rqpair.qpair); 1347 CU_ASSERT(rc == -EPROTO); 1348 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1349 1350 /* Test5: Current_num_wc is not enough, resize success */ 1351 MOCK_SET(ibv_resize_cq, 0); 1352 1353 rc = nvme_rdma_qpair_set_poller(&rqpair.qpair); 1354 CU_ASSERT(rc == 0); 1355 1356 poller = STAILQ_FIRST(&group->pollers); 1357 SPDK_CU_ASSERT_FATAL(poller != NULL); 1358 CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE * 2); 1359 CU_ASSERT(poller->required_num_wc == (DEFAULT_NVME_RDMA_CQ_SIZE - 1) * 2); 1360 CU_ASSERT(rqpair.cq == poller->cq); 1361 CU_ASSERT(rqpair.poller == poller); 1362 1363 rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs; 1364 1365 nvme_rdma_poll_group_put_poller(group, rqpair.poller); 1366 CU_ASSERT(STAILQ_EMPTY(&group->pollers)); 1367 1368 rc = nvme_rdma_poll_group_destroy(tgroup); 1369 CU_ASSERT(rc == 0); 1370 } 1371 1372 int 1373 main(int argc, char **argv) 1374 { 1375 CU_pSuite suite = NULL; 1376 unsigned int num_failures; 1377 1378 CU_initialize_registry(); 1379 1380 suite = CU_add_suite("nvme_rdma", NULL, NULL); 1381 CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request); 1382 CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request); 1383 CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request); 1384 CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request); 1385 CU_ADD_TEST(suite, test_nvme_rdma_create_reqs); 1386 CU_ADD_TEST(suite, test_nvme_rdma_create_rsps); 1387 CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair); 1388 CU_ADD_TEST(suite, test_nvme_rdma_poller_create); 1389 CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event); 1390 CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct); 1391 CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get); 1392 CU_ADD_TEST(suite, test_nvme_rdma_req_init); 1393 CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event); 1394 CU_ADD_TEST(suite, test_nvme_rdma_qpair_init); 1395 CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request); 1396 CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains); 1397 CU_ADD_TEST(suite, test_rdma_get_memory_translation); 1398 CU_ADD_TEST(suite, test_get_rdma_qpair_from_wc); 1399 CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges); 1400 CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats); 1401 CU_ADD_TEST(suite, test_nvme_rdma_qpair_set_poller); 1402 1403 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1404 CU_cleanup_registry(); 1405 return num_failures; 1406 } 1407