1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved. 4 * Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "common/lib/test_env.c" 10 #include "common/lib/test_iobuf.c" 11 #include "common/lib/test_rdma.c" 12 #include "nvmf/rdma.c" 13 #include "nvmf/transport.c" 14 15 #define RDMA_UT_UNITS_IN_MAX_IO 16 16 17 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 18 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 19 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 20 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 21 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 22 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 23 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 24 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 25 }; 26 27 SPDK_LOG_REGISTER_COMPONENT(nvmf) 28 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 29 uint64_t size, uint64_t translation), 0); 30 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 31 uint64_t size), 0); 32 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 33 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 34 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 35 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 37 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0); 38 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 39 40 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 41 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 42 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 43 const struct spdk_nvme_transport_id *trid2), 0); 44 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 45 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 46 struct spdk_dif_ctx *dif_ctx), false); 47 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 48 enum spdk_nvme_transport_type trtype)); 49 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 50 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 51 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 52 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0); 53 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0); 54 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL); 55 56 /* ibv_reg_mr can be a macro, need to undefine it */ 57 #ifdef ibv_reg_mr 58 #undef ibv_reg_mr 59 #endif 60 61 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *); 62 struct ibv_mr * 63 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) 64 { 65 HANDLE_RETURN_MOCK(ibv_reg_mr); 66 if (length > 0) { 67 return &g_rdma_mr; 68 } else { 69 return NULL; 70 } 71 } 72 73 int 74 ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, 75 int attr_mask, struct ibv_qp_init_attr *init_attr) 76 { 77 if (qp == NULL) { 78 return -1; 79 } else { 80 attr->port_num = 80; 81 82 if (qp->state == IBV_QPS_ERR) { 83 attr->qp_state = 10; 84 } else { 85 attr->qp_state = IBV_QPS_INIT; 86 } 87 88 return 0; 89 } 90 } 91 92 const char * 93 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 94 { 95 switch (trtype) { 96 case SPDK_NVME_TRANSPORT_PCIE: 97 return "PCIe"; 98 case SPDK_NVME_TRANSPORT_RDMA: 99 return "RDMA"; 100 case SPDK_NVME_TRANSPORT_FC: 101 return "FC"; 102 default: 103 return NULL; 104 } 105 } 106 107 int 108 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 109 { 110 int len, i; 111 112 if (trstring == NULL) { 113 return -EINVAL; 114 } 115 116 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 117 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 118 return -EINVAL; 119 } 120 121 /* cast official trstring to uppercase version of input. */ 122 for (i = 0; i < len; i++) { 123 trid->trstring[i] = toupper(trstring[i]); 124 } 125 return 0; 126 } 127 128 static void 129 reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 130 { 131 int i; 132 133 rdma_req->req.length = 0; 134 rdma_req->req.data_from_pool = false; 135 rdma_req->data.wr.num_sge = 0; 136 rdma_req->data.wr.wr.rdma.remote_addr = 0; 137 rdma_req->data.wr.wr.rdma.rkey = 0; 138 rdma_req->offset = 0; 139 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 140 141 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 142 rdma_req->req.iov[i].iov_base = 0; 143 rdma_req->req.iov[i].iov_len = 0; 144 rdma_req->data.wr.sg_list[i].addr = 0; 145 rdma_req->data.wr.sg_list[i].length = 0; 146 rdma_req->data.wr.sg_list[i].lkey = 0; 147 } 148 rdma_req->req.iovcnt = 0; 149 if (rdma_req->req.stripped_data) { 150 free(rdma_req->req.stripped_data); 151 rdma_req->req.stripped_data = NULL; 152 } 153 } 154 155 static void 156 test_spdk_nvmf_rdma_request_parse_sgl(void) 157 { 158 struct spdk_nvmf_rdma_transport rtransport; 159 struct spdk_nvmf_rdma_device device; 160 struct spdk_nvmf_rdma_request rdma_req = {}; 161 struct spdk_nvmf_rdma_recv recv; 162 struct spdk_nvmf_rdma_poll_group group; 163 struct spdk_nvmf_rdma_qpair rqpair; 164 struct spdk_nvmf_rdma_poller poller; 165 union nvmf_c2h_msg cpl; 166 union nvmf_h2c_msg cmd; 167 struct spdk_nvme_sgl_descriptor *sgl; 168 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 169 struct spdk_nvmf_rdma_request_data data; 170 int rc, i; 171 uint32_t sgl_length; 172 173 data.wr.sg_list = data.sgl; 174 group.group.transport = &rtransport.transport; 175 poller.group = &group; 176 rqpair.poller = &poller; 177 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 178 179 sgl = &cmd.nvme_cmd.dptr.sgl1; 180 rdma_req.recv = &recv; 181 rdma_req.req.cmd = &cmd; 182 rdma_req.req.rsp = &cpl; 183 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 184 rdma_req.req.qpair = &rqpair.qpair; 185 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 186 187 rtransport.transport.opts = g_rdma_ut_transport_opts; 188 rtransport.data_wr_pool = NULL; 189 190 device.attr.device_cap_flags = 0; 191 sgl->keyed.key = 0xEEEE; 192 sgl->address = 0xFFFF; 193 rdma_req.recv->buf = (void *)0xDDDD; 194 195 /* Test 1: sgl type: keyed data block subtype: address */ 196 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 197 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 198 199 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 200 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 201 reset_nvmf_rdma_request(&rdma_req); 202 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 203 204 device.map = (void *)0x0; 205 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 206 CU_ASSERT(rc == 0); 207 CU_ASSERT(rdma_req.req.data_from_pool == true); 208 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 209 CU_ASSERT((uint64_t)rdma_req.req.iovcnt == 1); 210 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 211 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 212 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 213 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 214 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 215 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 216 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 217 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 218 219 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 220 reset_nvmf_rdma_request(&rdma_req); 221 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 222 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 223 224 CU_ASSERT(rc == 0); 225 CU_ASSERT(rdma_req.req.data_from_pool == true); 226 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 227 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 228 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 229 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 230 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 231 CU_ASSERT((uint64_t)rdma_req.req.iov[i].iov_base == 0x2000); 232 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 233 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 234 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 235 } 236 237 /* Part 3: simple I/O one SGL larger than the transport max io size */ 238 reset_nvmf_rdma_request(&rdma_req); 239 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 240 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 241 242 CU_ASSERT(rc == -1); 243 244 /* Part 4: Pretend there are no buffer pools */ 245 MOCK_SET(spdk_iobuf_get, NULL); 246 reset_nvmf_rdma_request(&rdma_req); 247 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 248 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 249 250 CU_ASSERT(rc == 0); 251 CU_ASSERT(rdma_req.req.data_from_pool == false); 252 CU_ASSERT(rdma_req.req.iovcnt == 0); 253 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 254 CU_ASSERT(rdma_req.req.iov[0].iov_base == NULL); 255 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 256 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 257 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 258 259 rdma_req.recv->buf = (void *)0xDDDD; 260 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 261 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 262 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 263 264 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 265 reset_nvmf_rdma_request(&rdma_req); 266 sgl->address = 0; 267 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 268 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 269 270 CU_ASSERT(rc == 0); 271 CU_ASSERT(rdma_req.req.iovcnt == 1); 272 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0xDDDD); 273 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 274 CU_ASSERT(rdma_req.req.data_from_pool == false); 275 276 /* Part 2: I/O offset + length too large */ 277 reset_nvmf_rdma_request(&rdma_req); 278 sgl->address = rtransport.transport.opts.in_capsule_data_size; 279 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 280 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 281 282 CU_ASSERT(rc == -1); 283 284 /* Part 3: I/O too large */ 285 reset_nvmf_rdma_request(&rdma_req); 286 sgl->address = 0; 287 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 288 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 289 290 CU_ASSERT(rc == -1); 291 292 /* Test 3: Multi SGL */ 293 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 294 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 295 sgl->address = 0; 296 rdma_req.recv->buf = (void *)&sgl_desc; 297 MOCK_SET(spdk_iobuf_get, &data); 298 MOCK_SET(spdk_mempool_get, &data); 299 300 /* part 1: 2 segments each with 1 wr. */ 301 reset_nvmf_rdma_request(&rdma_req); 302 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 303 for (i = 0; i < 2; i++) { 304 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 305 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 306 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 307 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 308 sgl_desc[i].keyed.key = 0x44; 309 } 310 311 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 312 313 CU_ASSERT(rc == 0); 314 CU_ASSERT(rdma_req.req.data_from_pool == true); 315 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 316 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 317 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 318 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 319 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 320 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 321 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 322 CU_ASSERT(data.wr.num_sge == 1); 323 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 324 325 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 326 reset_nvmf_rdma_request(&rdma_req); 327 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 328 for (i = 0; i < 2; i++) { 329 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 330 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 331 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 332 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 333 sgl_desc[i].keyed.key = 0x44; 334 } 335 336 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 337 338 CU_ASSERT(rc == 0); 339 CU_ASSERT(rdma_req.req.data_from_pool == true); 340 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 341 CU_ASSERT(rdma_req.req.iovcnt == 16); 342 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 343 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 344 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 345 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 346 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 347 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 348 CU_ASSERT(data.wr.num_sge == 8); 349 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 350 351 /* part 3: 2 segments, one very large, one very small */ 352 reset_nvmf_rdma_request(&rdma_req); 353 for (i = 0; i < 2; i++) { 354 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 355 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 356 sgl_desc[i].keyed.key = 0x44; 357 } 358 359 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 360 rtransport.transport.opts.io_unit_size / 2; 361 sgl_desc[0].address = 0x4000; 362 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 363 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 364 rtransport.transport.opts.io_unit_size / 2; 365 366 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 367 368 CU_ASSERT(rc == 0); 369 CU_ASSERT(rdma_req.req.data_from_pool == true); 370 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 371 CU_ASSERT(rdma_req.req.iovcnt == 16); 372 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 373 for (i = 0; i < 15; i++) { 374 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 375 } 376 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 377 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 378 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 379 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 380 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 381 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 382 rtransport.transport.opts.io_unit_size / 2); 383 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 384 CU_ASSERT(data.wr.num_sge == 1); 385 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 386 387 /* part 4: 2 SGL descriptors, each length is transport buffer / 2 388 * 1 transport buffers should be allocated */ 389 reset_nvmf_rdma_request(&rdma_req); 390 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 391 sgl_length = rtransport.transport.opts.io_unit_size / 2; 392 for (i = 0; i < 2; i++) { 393 sgl_desc[i].keyed.length = sgl_length; 394 sgl_desc[i].address = 0x4000 + i * sgl_length; 395 } 396 397 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 398 399 CU_ASSERT(rc == 0); 400 CU_ASSERT(rdma_req.req.data_from_pool == true); 401 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size); 402 CU_ASSERT(rdma_req.req.iovcnt == 1); 403 404 CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length); 405 /* We mocked mempool_get to return address of data variable. Mempool is used 406 * to get both additional WRs and data buffers, so data points to &data */ 407 CU_ASSERT(rdma_req.data.sgl[0].addr == (uint64_t)&data); 408 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 409 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 410 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 411 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 412 413 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 414 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length); 415 CU_ASSERT(data.sgl[0].length == sgl_length); 416 CU_ASSERT(data.sgl[0].addr == (uint64_t)&data + sgl_length); 417 CU_ASSERT(data.wr.num_sge == 1); 418 419 MOCK_CLEAR(spdk_mempool_get); 420 MOCK_CLEAR(spdk_iobuf_get); 421 422 reset_nvmf_rdma_request(&rdma_req); 423 } 424 425 static struct spdk_nvmf_rdma_recv * 426 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 427 { 428 struct spdk_nvmf_rdma_recv *rdma_recv; 429 union nvmf_h2c_msg *cmd; 430 struct spdk_nvme_sgl_descriptor *sgl; 431 432 rdma_recv = calloc(1, sizeof(*rdma_recv)); 433 rdma_recv->qpair = rqpair; 434 cmd = calloc(1, sizeof(*cmd)); 435 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 436 cmd->nvme_cmd.opc = opc; 437 sgl = &cmd->nvme_cmd.dptr.sgl1; 438 sgl->keyed.key = 0xEEEE; 439 sgl->address = 0xFFFF; 440 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 441 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 442 sgl->keyed.length = 1; 443 444 return rdma_recv; 445 } 446 447 static void 448 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 449 { 450 free((void *)rdma_recv->sgl[0].addr); 451 free(rdma_recv); 452 } 453 454 static struct spdk_nvmf_rdma_request * 455 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 456 struct spdk_nvmf_rdma_recv *rdma_recv) 457 { 458 struct spdk_nvmf_rdma_request *rdma_req; 459 union nvmf_c2h_msg *cpl; 460 461 rdma_req = calloc(1, sizeof(*rdma_req)); 462 rdma_req->recv = rdma_recv; 463 rdma_req->req.qpair = &rqpair->qpair; 464 rdma_req->state = RDMA_REQUEST_STATE_NEW; 465 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data_wr; 466 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 467 cpl = calloc(1, sizeof(*cpl)); 468 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 469 rdma_req->req.rsp = cpl; 470 471 return rdma_req; 472 } 473 474 static void 475 free_req(struct spdk_nvmf_rdma_request *rdma_req) 476 { 477 free((void *)rdma_req->rsp.sgl[0].addr); 478 free(rdma_req); 479 } 480 481 static void 482 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 483 struct spdk_nvmf_rdma_poller *poller, 484 struct spdk_nvmf_rdma_device *device, 485 struct spdk_nvmf_rdma_resources *resources, 486 struct spdk_nvmf_transport *transport) 487 { 488 memset(rqpair, 0, sizeof(*rqpair)); 489 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 490 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 491 STAILQ_INIT(&rqpair->pending_rdma_send_queue); 492 rqpair->poller = poller; 493 rqpair->device = device; 494 rqpair->resources = resources; 495 rqpair->qpair.qid = 1; 496 rqpair->ibv_state = IBV_QPS_RTS; 497 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 498 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 499 rqpair->max_send_depth = 16; 500 rqpair->max_read_depth = 16; 501 rqpair->qpair.transport = transport; 502 } 503 504 static void 505 poller_reset(struct spdk_nvmf_rdma_poller *poller, 506 struct spdk_nvmf_rdma_poll_group *group) 507 { 508 memset(poller, 0, sizeof(*poller)); 509 STAILQ_INIT(&poller->qpairs_pending_recv); 510 STAILQ_INIT(&poller->qpairs_pending_send); 511 poller->group = group; 512 } 513 514 static void 515 test_spdk_nvmf_rdma_request_process(void) 516 { 517 struct spdk_nvmf_rdma_transport rtransport = {}; 518 struct spdk_nvmf_rdma_poll_group group = {}; 519 struct spdk_nvmf_rdma_poller poller = {}; 520 struct spdk_nvmf_rdma_device device = {}; 521 struct spdk_nvmf_rdma_resources resources = {}; 522 struct spdk_nvmf_rdma_qpair rqpair = {}; 523 struct spdk_nvmf_rdma_recv *rdma_recv; 524 struct spdk_nvmf_rdma_request *rdma_req; 525 struct spdk_iobuf_channel ch = {}; 526 bool progress; 527 528 group.group.buf_cache = &ch; 529 530 STAILQ_INIT(&group.group.pending_buf_queue); 531 poller_reset(&poller, &group); 532 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 533 534 rtransport.transport.opts = g_rdma_ut_transport_opts; 535 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 536 sizeof(struct spdk_nvmf_rdma_request_data), 537 0, 0); 538 MOCK_CLEAR(spdk_iobuf_get); 539 540 device.attr.device_cap_flags = 0; 541 device.map = (void *)0x0; 542 543 /* Test 1: single SGL READ request */ 544 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 545 rdma_req = create_req(&rqpair, rdma_recv); 546 rqpair.current_recv_depth = 1; 547 /* NEW -> EXECUTING */ 548 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 549 CU_ASSERT(progress == true); 550 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 551 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 552 /* EXECUTED -> TRANSFERRING_C2H */ 553 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 554 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 555 CU_ASSERT(progress == true); 556 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 557 CU_ASSERT(rdma_req->recv == NULL); 558 /* COMPLETED -> FREE */ 559 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 560 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 561 CU_ASSERT(progress == true); 562 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 563 564 free_recv(rdma_recv); 565 free_req(rdma_req); 566 poller_reset(&poller, &group); 567 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 568 569 /* Test 2: single SGL WRITE request */ 570 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 571 rdma_req = create_req(&rqpair, rdma_recv); 572 rqpair.current_recv_depth = 1; 573 /* NEW -> TRANSFERRING_H2C */ 574 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 575 CU_ASSERT(progress == true); 576 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 577 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 578 STAILQ_INIT(&poller.qpairs_pending_send); 579 /* READY_TO_EXECUTE -> EXECUTING */ 580 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 581 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 582 CU_ASSERT(progress == true); 583 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 584 /* EXECUTED -> COMPLETING */ 585 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 586 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 587 CU_ASSERT(progress == true); 588 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 589 CU_ASSERT(rdma_req->recv == NULL); 590 /* COMPLETED -> FREE */ 591 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 592 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 593 CU_ASSERT(progress == true); 594 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 595 596 free_recv(rdma_recv); 597 free_req(rdma_req); 598 poller_reset(&poller, &group); 599 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 600 601 /* Test 3: WRITE+WRITE ibv_send batching */ 602 { 603 struct spdk_nvmf_rdma_recv *recv1, *recv2; 604 struct spdk_nvmf_rdma_request *req1, *req2; 605 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 606 req1 = create_req(&rqpair, recv1); 607 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 608 req2 = create_req(&rqpair, recv2); 609 610 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 611 rqpair.current_recv_depth = 1; 612 nvmf_rdma_request_process(&rtransport, req1); 613 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 614 615 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 616 rqpair.current_recv_depth = 2; 617 nvmf_rdma_request_process(&rtransport, req2); 618 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 619 620 STAILQ_INIT(&poller.qpairs_pending_send); 621 622 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 623 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 624 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 625 nvmf_rdma_request_process(&rtransport, req1); 626 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 627 /* WRITE 1: EXECUTED -> COMPLETING */ 628 req1->state = RDMA_REQUEST_STATE_EXECUTED; 629 nvmf_rdma_request_process(&rtransport, req1); 630 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 631 STAILQ_INIT(&poller.qpairs_pending_send); 632 /* WRITE 1: COMPLETED -> FREE */ 633 req1->state = RDMA_REQUEST_STATE_COMPLETED; 634 nvmf_rdma_request_process(&rtransport, req1); 635 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 636 637 /* Now WRITE 2 has finished reading and completes */ 638 /* WRITE 2: COMPLETED -> FREE */ 639 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 640 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 641 nvmf_rdma_request_process(&rtransport, req2); 642 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 643 /* WRITE 1: EXECUTED -> COMPLETING */ 644 req2->state = RDMA_REQUEST_STATE_EXECUTED; 645 nvmf_rdma_request_process(&rtransport, req2); 646 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 647 STAILQ_INIT(&poller.qpairs_pending_send); 648 /* WRITE 1: COMPLETED -> FREE */ 649 req2->state = RDMA_REQUEST_STATE_COMPLETED; 650 nvmf_rdma_request_process(&rtransport, req2); 651 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 652 653 free_recv(recv1); 654 free_req(req1); 655 free_recv(recv2); 656 free_req(req2); 657 poller_reset(&poller, &group); 658 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 659 } 660 661 /* Test 4, invalid command, check xfer type */ 662 { 663 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 664 struct spdk_nvmf_rdma_request *rdma_req_inv; 665 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 666 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 667 668 rdma_recv_inv = create_recv(&rqpair, opc); 669 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 670 671 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 672 rqpair.current_recv_depth = 1; 673 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 674 CU_ASSERT(progress == true); 675 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 676 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 677 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 678 679 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 680 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 681 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 682 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 683 684 free_recv(rdma_recv_inv); 685 free_req(rdma_req_inv); 686 poller_reset(&poller, &group); 687 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 688 } 689 690 /* Test 5: Write response waits in queue */ 691 { 692 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 693 rdma_req = create_req(&rqpair, rdma_recv); 694 rqpair.current_recv_depth = 1; 695 /* NEW -> TRANSFERRING_H2C */ 696 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 697 CU_ASSERT(progress == true); 698 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 699 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 700 STAILQ_INIT(&poller.qpairs_pending_send); 701 /* READY_TO_EXECUTE -> EXECUTING */ 702 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 703 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 704 CU_ASSERT(progress == true); 705 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 706 /* EXECUTED -> COMPLETING */ 707 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 708 /* Send queue is full */ 709 rqpair.current_send_depth = rqpair.max_send_depth; 710 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 711 CU_ASSERT(progress == true); 712 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING); 713 CU_ASSERT(rdma_req == STAILQ_FIRST(&rqpair.pending_rdma_send_queue)); 714 715 /* Send queue is still full */ 716 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 717 CU_ASSERT(progress == false); 718 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING); 719 CU_ASSERT(rdma_req == STAILQ_FIRST(&rqpair.pending_rdma_send_queue)); 720 721 /* Slot is available */ 722 rqpair.current_send_depth = rqpair.max_send_depth - 1; 723 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 724 CU_ASSERT(progress == true); 725 CU_ASSERT(STAILQ_EMPTY(&rqpair.pending_rdma_send_queue)); 726 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 727 CU_ASSERT(rdma_req->recv == NULL); 728 /* COMPLETED -> FREE */ 729 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 730 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 731 CU_ASSERT(progress == true); 732 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 733 734 free_recv(rdma_recv); 735 free_req(rdma_req); 736 poller_reset(&poller, &group); 737 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 738 739 } 740 741 spdk_mempool_free(rtransport.data_wr_pool); 742 } 743 744 #define TEST_GROUPS_COUNT 5 745 static void 746 test_nvmf_rdma_get_optimal_poll_group(void) 747 { 748 struct spdk_nvmf_rdma_transport rtransport = {}; 749 struct spdk_nvmf_transport *transport = &rtransport.transport; 750 struct spdk_nvmf_rdma_qpair rqpair = {}; 751 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 752 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 753 struct spdk_nvmf_transport_poll_group *result; 754 struct spdk_nvmf_poll_group group = {}; 755 uint32_t i; 756 757 rqpair.qpair.transport = transport; 758 TAILQ_INIT(&rtransport.poll_groups); 759 760 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 761 groups[i] = nvmf_rdma_poll_group_create(transport, NULL); 762 CU_ASSERT(groups[i] != NULL); 763 groups[i]->group = &group; 764 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 765 groups[i]->transport = transport; 766 } 767 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 768 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 769 770 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 771 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 772 rqpair.qpair.qid = 0; 773 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 774 CU_ASSERT(result == groups[i]); 775 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 776 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 777 778 rqpair.qpair.qid = 1; 779 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 780 CU_ASSERT(result == groups[i]); 781 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 782 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 783 } 784 /* wrap around, admin/io pg point to the first pg 785 Destroy all poll groups except of the last one */ 786 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 787 nvmf_rdma_poll_group_destroy(groups[i]); 788 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 789 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 790 } 791 792 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 793 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 794 795 /* Check that pointers to the next admin/io poll groups are not changed */ 796 rqpair.qpair.qid = 0; 797 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 798 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 799 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 800 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 801 802 rqpair.qpair.qid = 1; 803 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 804 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 805 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 806 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 807 808 /* Remove the last poll group, check that pointers are NULL */ 809 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 810 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 811 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 812 813 /* Request optimal poll group, result must be NULL */ 814 rqpair.qpair.qid = 0; 815 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 816 CU_ASSERT(result == NULL); 817 818 rqpair.qpair.qid = 1; 819 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 820 CU_ASSERT(result == NULL); 821 } 822 #undef TEST_GROUPS_COUNT 823 824 static void 825 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 826 { 827 struct spdk_nvmf_rdma_transport rtransport; 828 struct spdk_nvmf_rdma_device device; 829 struct spdk_nvmf_rdma_request rdma_req = {}; 830 struct spdk_nvmf_rdma_recv recv; 831 struct spdk_nvmf_rdma_poll_group group; 832 struct spdk_nvmf_rdma_qpair rqpair; 833 struct spdk_nvmf_rdma_poller poller; 834 union nvmf_c2h_msg cpl; 835 union nvmf_h2c_msg cmd; 836 struct spdk_nvme_sgl_descriptor *sgl; 837 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 838 char data_buffer[8192]; 839 struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer; 840 char data2_buffer[8192]; 841 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 842 const uint32_t data_bs = 512; 843 const uint32_t md_size = 8; 844 int rc, i; 845 struct spdk_dif_ctx_init_ext_opts dif_opts; 846 847 MOCK_CLEAR(spdk_mempool_get); 848 MOCK_CLEAR(spdk_iobuf_get); 849 850 data->wr.sg_list = data->sgl; 851 group.group.transport = &rtransport.transport; 852 poller.group = &group; 853 rqpair.poller = &poller; 854 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 855 856 sgl = &cmd.nvme_cmd.dptr.sgl1; 857 rdma_req.recv = &recv; 858 rdma_req.req.cmd = &cmd; 859 rdma_req.req.rsp = &cpl; 860 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 861 rdma_req.req.qpair = &rqpair.qpair; 862 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 863 864 rtransport.transport.opts = g_rdma_ut_transport_opts; 865 rtransport.data_wr_pool = NULL; 866 867 device.attr.device_cap_flags = 0; 868 device.map = NULL; 869 sgl->keyed.key = 0xEEEE; 870 sgl->address = 0xFFFF; 871 rdma_req.recv->buf = (void *)0xDDDD; 872 873 /* Test 1: sgl type: keyed data block subtype: address */ 874 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 875 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 876 877 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 878 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 879 reset_nvmf_rdma_request(&rdma_req); 880 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 881 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 882 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 883 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 884 0, 0, 0, 0, 0, &dif_opts); 885 rdma_req.req.dif_enabled = true; 886 rtransport.transport.opts.io_unit_size = data_bs * 8; 887 rdma_req.req.qpair->transport = &rtransport.transport; 888 sgl->keyed.length = data_bs * 4; 889 890 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 891 892 CU_ASSERT(rc == 0); 893 CU_ASSERT(rdma_req.req.data_from_pool == true); 894 CU_ASSERT(rdma_req.req.length == data_bs * 4); 895 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 896 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 897 CU_ASSERT(rdma_req.req.iovcnt == 1); 898 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 899 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 900 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 901 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 902 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 903 904 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 905 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length); 906 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 907 908 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 909 block size 512 */ 910 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 911 reset_nvmf_rdma_request(&rdma_req); 912 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 913 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 914 0, 0, 0, 0, 0, &dif_opts); 915 rdma_req.req.dif_enabled = true; 916 rtransport.transport.opts.io_unit_size = data_bs * 4; 917 sgl->keyed.length = data_bs * 4; 918 919 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 920 921 CU_ASSERT(rc == 0); 922 CU_ASSERT(rdma_req.req.data_from_pool == true); 923 CU_ASSERT(rdma_req.req.length == data_bs * 4); 924 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 925 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 926 CU_ASSERT(rdma_req.req.iovcnt == 2); 927 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 928 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 929 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 930 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 931 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 932 933 for (i = 0; i < 3; ++i) { 934 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 935 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 936 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 937 } 938 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 939 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 940 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 941 942 /* 2nd buffer consumed */ 943 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 944 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 945 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 946 947 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 948 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 949 reset_nvmf_rdma_request(&rdma_req); 950 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 951 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 952 0, 0, 0, 0, 0, &dif_opts); 953 rdma_req.req.dif_enabled = true; 954 rtransport.transport.opts.io_unit_size = data_bs; 955 sgl->keyed.length = data_bs; 956 957 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 958 959 CU_ASSERT(rc == 0); 960 CU_ASSERT(rdma_req.req.data_from_pool == true); 961 CU_ASSERT(rdma_req.req.length == data_bs); 962 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 963 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 964 CU_ASSERT(rdma_req.req.iovcnt == 2); 965 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 966 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 967 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 968 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 969 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 970 971 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 972 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 973 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 974 975 CU_ASSERT(rdma_req.req.iovcnt == 2); 976 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 977 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 978 /* 2nd buffer consumed for metadata */ 979 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 980 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 981 982 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 983 block size 512 */ 984 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 985 reset_nvmf_rdma_request(&rdma_req); 986 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 987 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 988 0, 0, 0, 0, 0, &dif_opts); 989 rdma_req.req.dif_enabled = true; 990 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 991 sgl->keyed.length = data_bs * 4; 992 993 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 994 995 CU_ASSERT(rc == 0); 996 CU_ASSERT(rdma_req.req.data_from_pool == true); 997 CU_ASSERT(rdma_req.req.length == data_bs * 4); 998 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 999 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1000 CU_ASSERT(rdma_req.req.iovcnt == 1); 1001 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1002 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1003 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1004 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1005 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1006 1007 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1008 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rdma_req.req.length); 1009 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1010 1011 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1012 block size 512 */ 1013 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 1014 reset_nvmf_rdma_request(&rdma_req); 1015 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1016 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1017 0, 0, 0, 0, 0, &dif_opts); 1018 rdma_req.req.dif_enabled = true; 1019 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1020 sgl->keyed.length = data_bs * 4; 1021 1022 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1023 1024 CU_ASSERT(rc == 0); 1025 CU_ASSERT(rdma_req.req.data_from_pool == true); 1026 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1027 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1028 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1029 CU_ASSERT(rdma_req.req.iovcnt == 2); 1030 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1031 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1032 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1033 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1034 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1035 1036 for (i = 0; i < 2; ++i) { 1037 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 1038 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs * 2); 1039 } 1040 1041 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1042 block size 512 */ 1043 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 1044 reset_nvmf_rdma_request(&rdma_req); 1045 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1046 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1047 0, 0, 0, 0, 0, &dif_opts); 1048 rdma_req.req.dif_enabled = true; 1049 rtransport.transport.opts.io_unit_size = data_bs * 4; 1050 sgl->keyed.length = data_bs * 6; 1051 1052 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1053 1054 CU_ASSERT(rc == 0); 1055 CU_ASSERT(rdma_req.req.data_from_pool == true); 1056 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1057 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1058 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1059 CU_ASSERT(rdma_req.req.iovcnt == 2); 1060 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1061 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1062 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1063 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1064 CU_ASSERT((uint64_t)rdma_req.req.iov[0].iov_base == 0x2000); 1065 1066 for (i = 0; i < 3; ++i) { 1067 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1068 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1069 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1070 } 1071 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1072 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1073 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 1074 1075 /* 2nd IO buffer consumed */ 1076 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1077 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1078 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 1079 1080 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1081 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1082 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY); 1083 1084 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1085 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1086 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY); 1087 1088 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1089 one WR can hold. Additional WR is chained */ 1090 MOCK_SET(spdk_iobuf_get, data2_buffer); 1091 MOCK_SET(spdk_mempool_get, data2_buffer); 1092 reset_nvmf_rdma_request(&rdma_req); 1093 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1094 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1095 0, 0, 0, 0, 0, &dif_opts); 1096 rdma_req.req.dif_enabled = true; 1097 rtransport.transport.opts.io_unit_size = data_bs * 16; 1098 sgl->keyed.length = data_bs * 16; 1099 1100 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1101 1102 CU_ASSERT(rc == 0); 1103 CU_ASSERT(rdma_req.req.data_from_pool == true); 1104 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1105 CU_ASSERT(rdma_req.req.iovcnt == 2); 1106 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1107 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1108 CU_ASSERT(rdma_req.req.iov[0].iov_base == data2_buffer); 1109 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1110 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1111 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1112 1113 for (i = 0; i < 15; ++i) { 1114 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)data2_buffer + i * (data_bs + md_size)); 1115 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1116 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1117 } 1118 1119 /* 8192 - (512 + 8) * 15 = 392 */ 1120 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)data2_buffer + i * (data_bs + md_size)); 1121 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392); 1122 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1123 1124 /* additional wr from pool */ 1125 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1126 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1127 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1128 /* 2nd IO buffer */ 1129 CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)data2_buffer); 1130 CU_ASSERT(data2->wr.sg_list[0].length == 120); 1131 CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY); 1132 1133 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1134 MOCK_SET(spdk_iobuf_get, (void *)0x2000); 1135 reset_nvmf_rdma_request(&rdma_req); 1136 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1137 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1138 0, 0, 0, 0, 0, &dif_opts); 1139 rdma_req.req.dif_enabled = true; 1140 rtransport.transport.opts.io_unit_size = 516; 1141 sgl->keyed.length = data_bs * 2; 1142 1143 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1144 1145 CU_ASSERT(rc == 0); 1146 CU_ASSERT(rdma_req.req.data_from_pool == true); 1147 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1148 CU_ASSERT(rdma_req.req.iovcnt == 3); 1149 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1150 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1151 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)0x2000); 1152 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1153 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1154 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1155 1156 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1157 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1158 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1159 1160 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1161 is located at the beginning of that buffer */ 1162 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1163 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1164 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY); 1165 1166 /* Test 2: Multi SGL */ 1167 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1168 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1169 sgl->address = 0; 1170 rdma_req.recv->buf = (void *)&sgl_desc; 1171 MOCK_SET(spdk_mempool_get, data_buffer); 1172 MOCK_SET(spdk_iobuf_get, data_buffer); 1173 1174 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1175 reset_nvmf_rdma_request(&rdma_req); 1176 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1177 SPDK_DIF_TYPE1, 1178 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1179 0, 0, 0, 0, 0, &dif_opts); 1180 rdma_req.req.dif_enabled = true; 1181 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1182 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1183 1184 for (i = 0; i < 2; i++) { 1185 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1186 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1187 sgl_desc[i].keyed.length = data_bs * 4; 1188 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1189 sgl_desc[i].keyed.key = 0x44; 1190 } 1191 1192 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1193 1194 CU_ASSERT(rc == 0); 1195 CU_ASSERT(rdma_req.req.data_from_pool == true); 1196 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1197 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1198 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1199 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (uintptr_t)(data_buffer)); 1201 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs * 4); 1202 1203 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1204 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1205 CU_ASSERT(rdma_req.data.wr.next == &data->wr); 1206 CU_ASSERT(data->wr.wr.rdma.rkey == 0x44); 1207 CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1208 CU_ASSERT(data->wr.num_sge == 1); 1209 CU_ASSERT(data->wr.sg_list[0].addr == (uintptr_t)(data_buffer)); 1210 CU_ASSERT(data->wr.sg_list[0].length == data_bs * 4); 1211 1212 CU_ASSERT(data->wr.next == &rdma_req.rsp.wr); 1213 reset_nvmf_rdma_request(&rdma_req); 1214 } 1215 1216 static void 1217 test_nvmf_rdma_opts_init(void) 1218 { 1219 struct spdk_nvmf_transport_opts opts = {}; 1220 1221 nvmf_rdma_opts_init(&opts); 1222 CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH); 1223 CU_ASSERT(opts.max_qpairs_per_ctrlr == SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR); 1224 CU_ASSERT(opts.in_capsule_data_size == SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE); 1225 CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE); 1226 CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE); 1227 CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH); 1228 CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS); 1229 CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE); 1230 CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP); 1231 CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC); 1232 CU_ASSERT(opts.transport_specific == NULL); 1233 } 1234 1235 static void 1236 test_nvmf_rdma_request_free_data(void) 1237 { 1238 struct spdk_nvmf_rdma_request rdma_req = {}; 1239 struct spdk_nvmf_rdma_transport rtransport = {}; 1240 struct spdk_nvmf_rdma_request_data *next_request_data = NULL; 1241 1242 MOCK_CLEAR(spdk_mempool_get); 1243 rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data", 1244 SPDK_NVMF_MAX_SGL_ENTRIES, 1245 sizeof(struct spdk_nvmf_rdma_request_data), 1246 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 1247 SPDK_ENV_SOCKET_ID_ANY); 1248 next_request_data = spdk_mempool_get(rtransport.data_wr_pool); 1249 SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count == 1250 SPDK_NVMF_MAX_SGL_ENTRIES - 1); 1251 next_request_data->wr.wr_id = (uint64_t)&rdma_req.data_wr; 1252 next_request_data->wr.num_sge = 2; 1253 next_request_data->wr.next = NULL; 1254 rdma_req.data.wr.next = &next_request_data->wr; 1255 rdma_req.data.wr.wr_id = (uint64_t)&rdma_req.data_wr; 1256 rdma_req.data.wr.num_sge = 2; 1257 rdma_req.transfer_wr = &rdma_req.data.wr; 1258 1259 nvmf_rdma_request_free_data(&rdma_req, &rtransport); 1260 /* Check if next_request_data put into memory pool */ 1261 CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES); 1262 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 1263 1264 spdk_mempool_free(rtransport.data_wr_pool); 1265 } 1266 1267 static void 1268 test_nvmf_rdma_update_ibv_state(void) 1269 { 1270 struct spdk_nvmf_rdma_qpair rqpair = {}; 1271 struct spdk_rdma_qp rdma_qp = {}; 1272 struct ibv_qp qp = {}; 1273 int rc = 0; 1274 1275 rqpair.rdma_qp = &rdma_qp; 1276 1277 /* Case 1: Failed to get updated RDMA queue pair state */ 1278 rqpair.ibv_state = IBV_QPS_INIT; 1279 rqpair.rdma_qp->qp = NULL; 1280 1281 rc = nvmf_rdma_update_ibv_state(&rqpair); 1282 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1283 1284 /* Case 2: Bad state updated */ 1285 rqpair.rdma_qp->qp = &qp; 1286 qp.state = IBV_QPS_ERR; 1287 rc = nvmf_rdma_update_ibv_state(&rqpair); 1288 CU_ASSERT(rqpair.ibv_state == 10); 1289 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1290 1291 /* Case 3: Pass */ 1292 qp.state = IBV_QPS_INIT; 1293 rc = nvmf_rdma_update_ibv_state(&rqpair); 1294 CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT); 1295 CU_ASSERT(rc == IBV_QPS_INIT); 1296 } 1297 1298 static void 1299 test_nvmf_rdma_resources_create(void) 1300 { 1301 static struct spdk_nvmf_rdma_resources *rdma_resource; 1302 struct spdk_nvmf_rdma_resource_opts opts = {}; 1303 struct spdk_nvmf_rdma_qpair qpair = {}; 1304 struct spdk_nvmf_rdma_recv *recv = NULL; 1305 struct spdk_nvmf_rdma_request *req = NULL; 1306 const int DEPTH = 128; 1307 1308 opts.max_queue_depth = DEPTH; 1309 opts.in_capsule_data_size = 4096; 1310 opts.shared = true; 1311 opts.qpair = &qpair; 1312 1313 rdma_resource = nvmf_rdma_resources_create(&opts); 1314 CU_ASSERT(rdma_resource != NULL); 1315 /* Just check first and last entry */ 1316 recv = &rdma_resource->recvs[0]; 1317 req = &rdma_resource->reqs[0]; 1318 CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV); 1319 CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs)); 1320 CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[0]); 1321 CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[0])); 1322 CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY); 1323 CU_ASSERT(recv->wr.num_sge == 2); 1324 CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[0].rdma_wr); 1325 CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[0].sgl); 1326 CU_ASSERT(req->req.rsp == &rdma_resource->cpls[0]); 1327 CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[0]); 1328 CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[0])); 1329 CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY); 1330 CU_ASSERT(req->rsp_wr.type == RDMA_WR_TYPE_SEND); 1331 CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].rsp_wr); 1332 CU_ASSERT(req->rsp.wr.next == NULL); 1333 CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND); 1334 CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED); 1335 CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[0].rsp.sgl); 1336 CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE); 1337 CU_ASSERT(req->data_wr.type == RDMA_WR_TYPE_DATA); 1338 CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&rdma_resource->reqs[0].data_wr); 1339 CU_ASSERT(req->data.wr.next == NULL); 1340 CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED); 1341 CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[0].data.sgl); 1342 CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES); 1343 CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE); 1344 1345 recv = &rdma_resource->recvs[DEPTH - 1]; 1346 req = &rdma_resource->reqs[DEPTH - 1]; 1347 CU_ASSERT(recv->rdma_wr.type == RDMA_WR_TYPE_RECV); 1348 CU_ASSERT((uintptr_t)recv->buf == (uintptr_t)(rdma_resource->bufs + 1349 (DEPTH - 1) * 4096)); 1350 CU_ASSERT(recv->sgl[0].addr == (uintptr_t)&rdma_resource->cmds[DEPTH - 1]); 1351 CU_ASSERT(recv->sgl[0].length == sizeof(rdma_resource->cmds[DEPTH - 1])); 1352 CU_ASSERT(recv->sgl[0].lkey == RDMA_UT_LKEY); 1353 CU_ASSERT(recv->wr.num_sge == 2); 1354 CU_ASSERT(recv->wr.wr_id == (uintptr_t)&rdma_resource->recvs[DEPTH - 1].rdma_wr); 1355 CU_ASSERT(recv->wr.sg_list == rdma_resource->recvs[DEPTH - 1].sgl); 1356 CU_ASSERT(req->req.rsp == &rdma_resource->cpls[DEPTH - 1]); 1357 CU_ASSERT(req->rsp.sgl[0].addr == (uintptr_t)&rdma_resource->cpls[DEPTH - 1]); 1358 CU_ASSERT(req->rsp.sgl[0].length == sizeof(rdma_resource->cpls[DEPTH - 1])); 1359 CU_ASSERT(req->rsp.sgl[0].lkey == RDMA_UT_LKEY); 1360 CU_ASSERT(req->rsp_wr.type == RDMA_WR_TYPE_SEND); 1361 CU_ASSERT(req->rsp.wr.wr_id == (uintptr_t)&req->rsp_wr); 1362 CU_ASSERT(req->rsp.wr.next == NULL); 1363 CU_ASSERT(req->rsp.wr.opcode == IBV_WR_SEND); 1364 CU_ASSERT(req->rsp.wr.send_flags == IBV_SEND_SIGNALED); 1365 CU_ASSERT(req->rsp.wr.sg_list == rdma_resource->reqs[DEPTH - 1].rsp.sgl); 1366 CU_ASSERT(req->rsp.wr.num_sge == NVMF_DEFAULT_RSP_SGE); 1367 CU_ASSERT(req->data_wr.type == RDMA_WR_TYPE_DATA); 1368 CU_ASSERT(req->data.wr.wr_id == (uintptr_t)&req->data_wr); 1369 CU_ASSERT(req->data.wr.next == NULL); 1370 CU_ASSERT(req->data.wr.send_flags == IBV_SEND_SIGNALED); 1371 CU_ASSERT(req->data.wr.sg_list == rdma_resource->reqs[DEPTH - 1].data.sgl); 1372 CU_ASSERT(req->data.wr.num_sge == SPDK_NVMF_MAX_SGL_ENTRIES); 1373 CU_ASSERT(req->state == RDMA_REQUEST_STATE_FREE); 1374 1375 nvmf_rdma_resources_destroy(rdma_resource); 1376 } 1377 1378 static void 1379 test_nvmf_rdma_qpair_compare(void) 1380 { 1381 struct spdk_nvmf_rdma_qpair rqpair1 = {}, rqpair2 = {}; 1382 1383 rqpair1.qp_num = 0; 1384 rqpair2.qp_num = UINT32_MAX; 1385 1386 CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair1, &rqpair2) < 0); 1387 CU_ASSERT(nvmf_rdma_qpair_compare(&rqpair2, &rqpair1) > 0); 1388 } 1389 1390 static void 1391 test_nvmf_rdma_resize_cq(void) 1392 { 1393 int rc = -1; 1394 int tnum_wr = 0; 1395 int tnum_cqe = 0; 1396 struct spdk_nvmf_rdma_qpair rqpair = {}; 1397 struct spdk_nvmf_rdma_poller rpoller = {}; 1398 struct spdk_nvmf_rdma_device rdevice = {}; 1399 struct ibv_context ircontext = {}; 1400 struct ibv_device idevice = {}; 1401 1402 rdevice.context = &ircontext; 1403 rqpair.poller = &rpoller; 1404 ircontext.device = &idevice; 1405 1406 /* Test1: Current capacity support required size. */ 1407 rpoller.required_num_wr = 10; 1408 rpoller.num_cqe = 20; 1409 rqpair.max_queue_depth = 2; 1410 tnum_wr = rpoller.required_num_wr; 1411 tnum_cqe = rpoller.num_cqe; 1412 1413 rc = nvmf_rdma_resize_cq(&rqpair, &rdevice); 1414 CU_ASSERT(rc == 0); 1415 CU_ASSERT(rpoller.required_num_wr == 10 + MAX_WR_PER_QP(rqpair.max_queue_depth)); 1416 CU_ASSERT(rpoller.required_num_wr > tnum_wr); 1417 CU_ASSERT(rpoller.num_cqe == tnum_cqe); 1418 1419 /* Test2: iWARP doesn't support CQ resize. */ 1420 tnum_wr = rpoller.required_num_wr; 1421 tnum_cqe = rpoller.num_cqe; 1422 idevice.transport_type = IBV_TRANSPORT_IWARP; 1423 1424 rc = nvmf_rdma_resize_cq(&rqpair, &rdevice); 1425 CU_ASSERT(rc == -1); 1426 CU_ASSERT(rpoller.required_num_wr == tnum_wr); 1427 CU_ASSERT(rpoller.num_cqe == tnum_cqe); 1428 1429 1430 /* Test3: RDMA CQE requirement exceeds device max_cqe limitation. */ 1431 tnum_wr = rpoller.required_num_wr; 1432 tnum_cqe = rpoller.num_cqe; 1433 idevice.transport_type = IBV_TRANSPORT_UNKNOWN; 1434 rdevice.attr.max_cqe = 3; 1435 1436 rc = nvmf_rdma_resize_cq(&rqpair, &rdevice); 1437 CU_ASSERT(rc == -1); 1438 CU_ASSERT(rpoller.required_num_wr == tnum_wr); 1439 CU_ASSERT(rpoller.num_cqe == tnum_cqe); 1440 1441 /* Test4: RDMA CQ resize failed. */ 1442 tnum_wr = rpoller.required_num_wr; 1443 tnum_cqe = rpoller.num_cqe; 1444 idevice.transport_type = IBV_TRANSPORT_IB; 1445 rdevice.attr.max_cqe = 30; 1446 MOCK_SET(ibv_resize_cq, -1); 1447 1448 rc = nvmf_rdma_resize_cq(&rqpair, &rdevice); 1449 CU_ASSERT(rc == -1); 1450 CU_ASSERT(rpoller.required_num_wr == tnum_wr); 1451 CU_ASSERT(rpoller.num_cqe == tnum_cqe); 1452 1453 /* Test5: RDMA CQ resize success. rsize = MIN(MAX(num_cqe * 2, required_num_wr), device->attr.max_cqe). */ 1454 tnum_wr = rpoller.required_num_wr; 1455 tnum_cqe = rpoller.num_cqe; 1456 MOCK_SET(ibv_resize_cq, 0); 1457 1458 rc = nvmf_rdma_resize_cq(&rqpair, &rdevice); 1459 CU_ASSERT(rc == 0); 1460 CU_ASSERT(rpoller.num_cqe = 30); 1461 CU_ASSERT(rpoller.required_num_wr == 18 + MAX_WR_PER_QP(rqpair.max_queue_depth)); 1462 CU_ASSERT(rpoller.required_num_wr > tnum_wr); 1463 CU_ASSERT(rpoller.num_cqe > tnum_cqe); 1464 } 1465 1466 int 1467 main(int argc, char **argv) 1468 { 1469 CU_pSuite suite = NULL; 1470 unsigned int num_failures; 1471 1472 CU_initialize_registry(); 1473 1474 suite = CU_add_suite("nvmf", NULL, NULL); 1475 1476 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1477 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1478 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1479 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1480 CU_ADD_TEST(suite, test_nvmf_rdma_opts_init); 1481 CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data); 1482 CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state); 1483 CU_ADD_TEST(suite, test_nvmf_rdma_resources_create); 1484 CU_ADD_TEST(suite, test_nvmf_rdma_qpair_compare); 1485 CU_ADD_TEST(suite, test_nvmf_rdma_resize_cq); 1486 1487 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1488 CU_cleanup_registry(); 1489 return num_failures; 1490 } 1491