1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. All rights reserved. 3 */ 4 5 #include "spdk/stdinc.h" 6 #include "spdk_internal/cunit.h" 7 #include "common/lib/test_env.c" 8 #include "common/lib/test_iobuf.c" 9 #include "nvmf/transport.c" 10 #include "nvmf/rdma.c" 11 #include "common/lib/test_rdma.c" 12 13 SPDK_LOG_REGISTER_COMPONENT(nvmf) 14 15 #define RDMA_UT_UNITS_IN_MAX_IO 16 16 #define SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE 32 17 18 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 19 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 20 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 21 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 22 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 23 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 24 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 25 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 26 .opts_size = sizeof(g_rdma_ut_transport_opts) 27 }; 28 29 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 30 const struct spdk_nvme_transport_id *trid2), 0); 31 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 32 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 33 struct spdk_dif_ctx *dif_ctx), false); 34 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 35 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 36 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 37 enum spdk_nvme_transport_type trtype)); 38 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 39 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 40 DEFINE_STUB(ut_transport_destroy, int, (struct spdk_nvmf_transport *transport, 41 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg), 0); 42 DEFINE_STUB(ibv_get_device_name, const char *, (struct ibv_device *device), NULL); 43 DEFINE_STUB(ibv_query_qp, int, (struct ibv_qp *qp, struct ibv_qp_attr *attr, 44 int attr_mask, 45 struct ibv_qp_init_attr *init_attr), 0); 46 DEFINE_STUB(rdma_create_id, int, (struct rdma_event_channel *channel, 47 struct rdma_cm_id **id, void *context, 48 enum rdma_port_space ps), 0); 49 DEFINE_STUB(rdma_bind_addr, int, (struct rdma_cm_id *id, struct sockaddr *addr), 0); 50 DEFINE_STUB(rdma_listen, int, (struct rdma_cm_id *id, int backlog), 0); 51 DEFINE_STUB(rdma_destroy_id, int, (struct rdma_cm_id *id), 0); 52 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0); 53 DEFINE_STUB(rdma_reject, int, (struct rdma_cm_id *id, 54 const void *private_data, uint8_t private_data_len), 0); 55 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0); 56 DEFINE_STUB_V(rdma_destroy_qp, (struct rdma_cm_id *id)); 57 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel)); 58 DEFINE_STUB(ibv_dealloc_pd, int, (struct ibv_pd *pd), 0); 59 DEFINE_STUB(rdma_create_event_channel, struct rdma_event_channel *, (void), NULL); 60 DEFINE_STUB(rdma_get_devices, struct ibv_context **, (int *num_devices), NULL); 61 DEFINE_STUB(ibv_query_device, int, (struct ibv_context *context, 62 struct ibv_device_attr *device_attr), 0); 63 DEFINE_STUB(ibv_alloc_pd, struct ibv_pd *, (struct ibv_context *context), NULL); 64 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list)); 65 DEFINE_STUB(ibv_get_async_event, int, (struct ibv_context *context, struct ibv_async_event *event), 66 0); 67 DEFINE_STUB(ibv_event_type_str, const char *, (enum ibv_event_type event_type), NULL); 68 DEFINE_STUB_V(ibv_ack_async_event, (struct ibv_async_event *event)); 69 DEFINE_STUB(rdma_get_cm_event, int, (struct rdma_event_channel *channel, 70 struct rdma_cm_event **event), 0); 71 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0); 72 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0); 73 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, 74 void *cq_context, 75 struct ibv_comp_channel *channel, 76 int comp_vector), NULL); 77 DEFINE_STUB(ibv_wc_status_str, const char *, (enum ibv_wc_status status), NULL); 78 DEFINE_STUB(rdma_get_dst_port, __be16, (struct rdma_cm_id *id), 0); 79 DEFINE_STUB(rdma_get_src_port, __be16, (struct rdma_cm_id *id), 0); 80 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair, 81 struct spdk_nvme_transport_id *trid), 0); 82 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 83 DEFINE_STUB_V(ut_opts_init, (struct spdk_nvmf_transport_opts *opts)); 84 DEFINE_STUB(ut_transport_listen, int, (struct spdk_nvmf_transport *transport, 85 const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts), 0); 86 DEFINE_STUB_V(ut_transport_stop_listen, (struct spdk_nvmf_transport *transport, 87 const struct spdk_nvme_transport_id *trid)); 88 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL); 89 90 /* ibv_reg_mr can be a macro, need to undefine it */ 91 #ifdef ibv_reg_mr 92 #undef ibv_reg_mr 93 #endif 94 95 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *); 96 struct ibv_mr * 97 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) 98 { 99 HANDLE_RETURN_MOCK(ibv_reg_mr); 100 if (length > 0) { 101 return &g_rdma_mr; 102 } else { 103 return NULL; 104 } 105 } 106 107 struct spdk_nvmf_transport ut_transport = {}; 108 109 static int 110 ut_transport_create(struct spdk_nvmf_transport_opts *opts, spdk_nvmf_transport_create_done_cb cb_fn, 111 void *cb_arg) 112 { 113 cb_fn(cb_arg, &ut_transport); 114 return 0; 115 } 116 117 static void 118 test_nvmf_create_transport_done(void *cb_arg, struct spdk_nvmf_transport *transport) 119 { 120 struct spdk_nvmf_transport **ctx = cb_arg; 121 122 *ctx = transport; 123 } 124 125 static void 126 test_spdk_nvmf_transport_create(void) 127 { 128 int rc; 129 struct spdk_nvmf_transport *transport = NULL; 130 struct nvmf_transport_ops_list_element *ops_element; 131 struct spdk_iobuf_opts opts_iobuf = {}; 132 struct spdk_nvmf_transport_ops ops = { 133 .name = "new_ops", 134 .type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA, 135 .create_async = ut_transport_create, 136 .destroy = ut_transport_destroy 137 }; 138 139 opts_iobuf.large_bufsize = 0x10000; 140 opts_iobuf.large_pool_count = 4096; 141 opts_iobuf.small_bufsize = 0x1000; 142 opts_iobuf.small_pool_count = 4096; 143 144 rc = spdk_iobuf_set_opts(&opts_iobuf); 145 CU_ASSERT(rc == 0); 146 147 /* No available ops element */ 148 rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts, 149 test_nvmf_create_transport_done, &transport); 150 CU_ASSERT(rc != 0); 151 CU_ASSERT(transport == NULL); 152 153 spdk_nvmf_transport_register(&ops); 154 155 /* Ensure io_unit_size cannot be set to 0 */ 156 g_rdma_ut_transport_opts.io_unit_size = 0; 157 rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts, 158 test_nvmf_create_transport_done, &transport); 159 CU_ASSERT(rc != 0); 160 CU_ASSERT(transport == NULL); 161 162 /* Ensure io_unit_size cannot be larger than large_bufsize */ 163 g_rdma_ut_transport_opts.io_unit_size = opts_iobuf.large_bufsize * 2; 164 rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts, 165 test_nvmf_create_transport_done, &transport); 166 CU_ASSERT(rc != 0); 167 CU_ASSERT(transport == NULL); 168 169 g_rdma_ut_transport_opts.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE; 170 171 /* Create transport successfully */ 172 rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts, 173 test_nvmf_create_transport_done, &transport); 174 CU_ASSERT(rc == 0); 175 CU_ASSERT(transport == &ut_transport); 176 CU_ASSERT(!memcmp(&transport->opts, &g_rdma_ut_transport_opts, sizeof(g_rdma_ut_transport_opts))); 177 CU_ASSERT(!memcmp(transport->ops, &ops, sizeof(ops))); 178 179 rc = spdk_nvmf_transport_destroy(transport, NULL, NULL); 180 CU_ASSERT(rc == 0); 181 182 /* transport_opts parameter invalid */ 183 transport = NULL; 184 g_rdma_ut_transport_opts.max_io_size = 4096; 185 186 rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts, 187 test_nvmf_create_transport_done, &transport); 188 CU_ASSERT(rc != 0); 189 CU_ASSERT(transport == NULL); 190 g_rdma_ut_transport_opts.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * 191 RDMA_UT_UNITS_IN_MAX_IO); 192 193 ops_element = TAILQ_LAST(&g_spdk_nvmf_transport_ops, nvmf_transport_ops_list); 194 TAILQ_REMOVE(&g_spdk_nvmf_transport_ops, ops_element, link); 195 free(ops_element); 196 } 197 198 static struct spdk_nvmf_transport_poll_group * 199 ut_poll_group_create(struct spdk_nvmf_transport *transport, 200 struct spdk_nvmf_poll_group *group) 201 { 202 struct spdk_nvmf_transport_poll_group *tgroup; 203 204 tgroup = calloc(1, sizeof(*tgroup)); 205 SPDK_CU_ASSERT_FATAL(tgroup != NULL); 206 return tgroup; 207 } 208 209 static void 210 ut_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 211 { 212 free(group); 213 } 214 215 static void 216 test_nvmf_transport_poll_group_create(void) 217 { 218 struct spdk_nvmf_transport_poll_group *poll_group = NULL; 219 struct spdk_nvmf_transport transport = {}; 220 struct spdk_nvmf_transport_ops ops = {}; 221 222 ops.poll_group_create = ut_poll_group_create; 223 ops.poll_group_destroy = ut_poll_group_destroy; 224 transport.ops = &ops; 225 transport.opts.buf_cache_size = SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE; 226 227 poll_group = nvmf_transport_poll_group_create(&transport, NULL); 228 SPDK_CU_ASSERT_FATAL(poll_group != NULL); 229 CU_ASSERT(poll_group->transport == &transport); 230 231 nvmf_transport_poll_group_destroy(poll_group); 232 233 poll_group = nvmf_transport_poll_group_create(&transport, NULL); 234 SPDK_CU_ASSERT_FATAL(poll_group != NULL); 235 CU_ASSERT(poll_group->transport == &transport); 236 237 nvmf_transport_poll_group_destroy(poll_group); 238 } 239 240 static void 241 test_spdk_nvmf_transport_opts_init(void) 242 { 243 int rc; 244 bool rcbool; 245 size_t opts_size; 246 struct spdk_nvmf_transport *transport = NULL; 247 struct spdk_nvmf_transport_opts opts = {}; 248 const struct spdk_nvmf_transport_ops *tops; 249 struct spdk_nvmf_transport_ops ops = { 250 .name = "ut_ops", 251 .type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA, 252 .create_async = ut_transport_create, 253 .destroy = ut_transport_destroy, 254 .opts_init = ut_opts_init 255 }; 256 257 spdk_nvmf_transport_register(&ops); 258 rc = spdk_nvmf_transport_create_async("ut_ops", &g_rdma_ut_transport_opts, 259 test_nvmf_create_transport_done, &transport); 260 CU_ASSERT(rc == 0); 261 CU_ASSERT(transport == &ut_transport); 262 263 tops = nvmf_get_transport_ops(ops.name); 264 CU_ASSERT(memcmp(tops, &ops, sizeof(struct spdk_nvmf_transport_ops)) == 0); 265 266 /* Test1: Invalid parameter: unavailable transport type */ 267 opts_size = sizeof(struct spdk_nvmf_transport_opts); 268 269 rcbool = spdk_nvmf_transport_opts_init("invalid_ops", &opts, opts_size); 270 CU_ASSERT(rcbool == false); 271 272 /* Test2: Invalid parameter: NULL pointer */ 273 rcbool = true; 274 275 rcbool = spdk_nvmf_transport_opts_init(ops.name, NULL, opts_size); 276 CU_ASSERT(rcbool == false); 277 278 /* Test3: Invalid parameter: opts_size inside opts be zero value */ 279 rcbool = true; 280 opts_size = 0; 281 282 rcbool = spdk_nvmf_transport_opts_init(ops.name, &opts, opts_size); 283 CU_ASSERT(rcbool == false); 284 285 /* Test4: success */ 286 opts.opts_size = 0; 287 opts_size = sizeof(struct spdk_nvmf_transport_opts); 288 289 rcbool = spdk_nvmf_transport_opts_init(ops.name, &opts, opts_size); 290 CU_ASSERT(rcbool == true); 291 CU_ASSERT(opts.opts_size == opts_size); 292 293 rc = spdk_nvmf_transport_destroy(transport, NULL, NULL); 294 CU_ASSERT(rc == 0); 295 } 296 297 static void 298 test_spdk_nvmf_transport_listen_ext(void) 299 { 300 int rc; 301 struct spdk_nvmf_transport *transport = NULL; 302 struct spdk_nvme_transport_id trid1 = {}; 303 struct spdk_nvme_transport_id trid2 = {}; 304 struct spdk_nvmf_listen_opts lopts = {}; 305 struct spdk_nvmf_listener *tlistener; 306 struct spdk_nvmf_transport_ops ops = { 307 .name = "ut_ops1", 308 .type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA, 309 .create_async = ut_transport_create, 310 .destroy = ut_transport_destroy, 311 .opts_init = ut_opts_init, 312 .listen = ut_transport_listen, 313 .stop_listen = ut_transport_stop_listen 314 }; 315 316 trid1.trtype = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA; 317 trid1.adrfam = (enum spdk_nvmf_adrfam)SPDK_NVMF_ADRFAM_IPV4; 318 trid1.priority = 4; 319 memcpy(trid1.traddr, "192.168.100.72", sizeof("192.168.100.72")); 320 memcpy(trid1.trsvcid, "4420", sizeof("4420")); 321 322 spdk_nvmf_transport_register(&ops); 323 rc = spdk_nvmf_transport_create_async("ut_ops1", &g_rdma_ut_transport_opts, 324 test_nvmf_create_transport_done, &transport); 325 CU_ASSERT(rc == 0); 326 CU_ASSERT(transport == &ut_transport); 327 328 /* Test1: Execute listen failed */ 329 MOCK_SET(ut_transport_listen, -1); 330 331 rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts); 332 tlistener = nvmf_transport_find_listener(transport, &trid1); 333 CU_ASSERT(rc == -1); 334 CU_ASSERT(tlistener == NULL); 335 336 /* Test2: Execute listen success */ 337 MOCK_SET(ut_transport_listen, 0); 338 339 rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts); 340 tlistener = nvmf_transport_find_listener(transport, &trid1); 341 CU_ASSERT(rc == 0); 342 CU_ASSERT(tlistener != NULL); 343 CU_ASSERT(tlistener->ref == 1); 344 CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0); 345 346 /* Test3: Listen for an identifier repeatedly */ 347 tlistener = NULL; 348 349 rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts); 350 tlistener = nvmf_transport_find_listener(transport, &trid1); 351 CU_ASSERT(rc == 0); 352 CU_ASSERT(tlistener != NULL); 353 CU_ASSERT(tlistener->ref == 2); 354 CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0); 355 356 /* Test4: Stop listen when ref >1, Listen will not be released */ 357 tlistener = NULL; 358 359 rc = spdk_nvmf_transport_stop_listen(transport, &trid1); 360 tlistener = nvmf_transport_find_listener(transport, &trid1); 361 CU_ASSERT(rc == 0); 362 CU_ASSERT(tlistener != NULL); 363 CU_ASSERT(tlistener->ref == 1); 364 CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0); 365 366 /* Test5: Stop listen when ref == 1, Listen will be released */ 367 368 rc = spdk_nvmf_transport_stop_listen(transport, &trid1); 369 tlistener = nvmf_transport_find_listener(transport, &trid1); 370 CU_ASSERT(rc == 0); 371 CU_ASSERT(tlistener == NULL); 372 373 /* Test6: Release unrecognized listener */ 374 rc = spdk_nvmf_transport_stop_listen(transport, &trid2); 375 376 CU_ASSERT(rc == -ENOENT); 377 378 rc = spdk_nvmf_transport_destroy(transport, NULL, NULL); 379 CU_ASSERT(rc == 0); 380 } 381 382 int 383 main(int argc, char **argv) 384 { 385 CU_pSuite suite = NULL; 386 unsigned int num_failures; 387 388 CU_initialize_registry(); 389 390 suite = CU_add_suite("nvmf", NULL, NULL); 391 392 CU_ADD_TEST(suite, test_spdk_nvmf_transport_create); 393 CU_ADD_TEST(suite, test_nvmf_transport_poll_group_create); 394 CU_ADD_TEST(suite, test_spdk_nvmf_transport_opts_init); 395 CU_ADD_TEST(suite, test_spdk_nvmf_transport_listen_ext); 396 397 num_failures = spdk_ut_run_tests(argc, argv, NULL); 398 CU_cleanup_registry(); 399 return num_failures; 400 } 401