1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 #define RDMA_UT_UNITS_IN_MAX_IO 16 42 43 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 44 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 45 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 46 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 47 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 48 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 49 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 50 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 51 }; 52 53 SPDK_LOG_REGISTER_COMPONENT(nvmf) 54 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 55 uint64_t size, uint64_t translation), 0); 56 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 57 uint64_t size), 0); 58 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 59 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 60 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 61 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 62 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 63 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0); 64 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 65 66 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 67 struct spdk_nvmf_ctrlr_data *cdata)); 68 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 69 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 70 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 71 const struct spdk_nvme_transport_id *trid2), 0); 72 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 73 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 74 struct spdk_dif_ctx *dif_ctx), false); 75 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 76 enum spdk_nvme_transport_type trtype)); 77 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 78 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 79 80 int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, 81 int attr_mask, struct ibv_qp_init_attr *init_attr) 82 { 83 if (qp == NULL) { 84 return -1; 85 } else { 86 attr->port_num = 80; 87 88 if (qp->state == IBV_QPS_ERR) { 89 attr->qp_state = 10; 90 } else { 91 attr->qp_state = IBV_QPS_INIT; 92 } 93 94 return 0; 95 } 96 } 97 98 const char * 99 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 100 { 101 switch (trtype) { 102 case SPDK_NVME_TRANSPORT_PCIE: 103 return "PCIe"; 104 case SPDK_NVME_TRANSPORT_RDMA: 105 return "RDMA"; 106 case SPDK_NVME_TRANSPORT_FC: 107 return "FC"; 108 default: 109 return NULL; 110 } 111 } 112 113 int 114 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 115 { 116 int len, i; 117 118 if (trstring == NULL) { 119 return -EINVAL; 120 } 121 122 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 123 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 124 return -EINVAL; 125 } 126 127 /* cast official trstring to uppercase version of input. */ 128 for (i = 0; i < len; i++) { 129 trid->trstring[i] = toupper(trstring[i]); 130 } 131 return 0; 132 } 133 134 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 135 { 136 int i; 137 138 rdma_req->req.length = 0; 139 rdma_req->req.data_from_pool = false; 140 rdma_req->req.data = NULL; 141 rdma_req->data.wr.num_sge = 0; 142 rdma_req->data.wr.wr.rdma.remote_addr = 0; 143 rdma_req->data.wr.wr.rdma.rkey = 0; 144 rdma_req->offset = 0; 145 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 146 147 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 148 rdma_req->req.iov[i].iov_base = 0; 149 rdma_req->req.iov[i].iov_len = 0; 150 rdma_req->req.buffers[i] = 0; 151 rdma_req->data.wr.sg_list[i].addr = 0; 152 rdma_req->data.wr.sg_list[i].length = 0; 153 rdma_req->data.wr.sg_list[i].lkey = 0; 154 } 155 rdma_req->req.iovcnt = 0; 156 } 157 158 static void 159 test_spdk_nvmf_rdma_request_parse_sgl(void) 160 { 161 struct spdk_nvmf_rdma_transport rtransport; 162 struct spdk_nvmf_rdma_device device; 163 struct spdk_nvmf_rdma_request rdma_req = {}; 164 struct spdk_nvmf_rdma_recv recv; 165 struct spdk_nvmf_rdma_poll_group group; 166 struct spdk_nvmf_rdma_qpair rqpair; 167 struct spdk_nvmf_rdma_poller poller; 168 union nvmf_c2h_msg cpl; 169 union nvmf_h2c_msg cmd; 170 struct spdk_nvme_sgl_descriptor *sgl; 171 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 172 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 173 struct spdk_nvmf_rdma_request_data data; 174 int rc, i; 175 uint32_t sgl_length; 176 uintptr_t aligned_buffer_address; 177 178 data.wr.sg_list = data.sgl; 179 STAILQ_INIT(&group.group.buf_cache); 180 group.group.buf_cache_size = 0; 181 group.group.buf_cache_count = 0; 182 group.group.transport = &rtransport.transport; 183 poller.group = &group; 184 rqpair.poller = &poller; 185 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 186 187 sgl = &cmd.nvme_cmd.dptr.sgl1; 188 rdma_req.recv = &recv; 189 rdma_req.req.cmd = &cmd; 190 rdma_req.req.rsp = &cpl; 191 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 192 rdma_req.req.qpair = &rqpair.qpair; 193 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 194 195 rtransport.transport.opts = g_rdma_ut_transport_opts; 196 rtransport.data_wr_pool = NULL; 197 rtransport.transport.data_buf_pool = NULL; 198 199 device.attr.device_cap_flags = 0; 200 sgl->keyed.key = 0xEEEE; 201 sgl->address = 0xFFFF; 202 rdma_req.recv->buf = (void *)0xDDDD; 203 204 /* Test 1: sgl type: keyed data block subtype: address */ 205 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 206 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 207 208 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 209 MOCK_SET(spdk_mempool_get, (void *)0x2000); 210 reset_nvmf_rdma_request(&rdma_req); 211 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 212 213 device.map = (void *)0x0; 214 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 215 CU_ASSERT(rc == 0); 216 CU_ASSERT(rdma_req.req.data_from_pool == true); 217 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 218 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 219 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 220 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 221 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 222 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 223 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 224 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 225 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 226 227 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 228 reset_nvmf_rdma_request(&rdma_req); 229 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 230 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 231 232 CU_ASSERT(rc == 0); 233 CU_ASSERT(rdma_req.req.data_from_pool == true); 234 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 235 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 236 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 237 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 238 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 239 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 240 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 241 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 242 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 243 } 244 245 /* Part 3: simple I/O one SGL larger than the transport max io size */ 246 reset_nvmf_rdma_request(&rdma_req); 247 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 248 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 249 250 CU_ASSERT(rc == -1); 251 252 /* Part 4: Pretend there are no buffer pools */ 253 MOCK_SET(spdk_mempool_get, NULL); 254 reset_nvmf_rdma_request(&rdma_req); 255 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 256 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 257 258 CU_ASSERT(rc == 0); 259 CU_ASSERT(rdma_req.req.data_from_pool == false); 260 CU_ASSERT(rdma_req.req.data == NULL); 261 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 262 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 263 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 264 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 265 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 266 267 rdma_req.recv->buf = (void *)0xDDDD; 268 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 269 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 270 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 271 272 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 273 reset_nvmf_rdma_request(&rdma_req); 274 sgl->address = 0; 275 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 276 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 277 278 CU_ASSERT(rc == 0); 279 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 280 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 281 CU_ASSERT(rdma_req.req.data_from_pool == false); 282 283 /* Part 2: I/O offset + length too large */ 284 reset_nvmf_rdma_request(&rdma_req); 285 sgl->address = rtransport.transport.opts.in_capsule_data_size; 286 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 287 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 288 289 CU_ASSERT(rc == -1); 290 291 /* Part 3: I/O too large */ 292 reset_nvmf_rdma_request(&rdma_req); 293 sgl->address = 0; 294 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 295 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 296 297 CU_ASSERT(rc == -1); 298 299 /* Test 3: Multi SGL */ 300 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 301 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 302 sgl->address = 0; 303 rdma_req.recv->buf = (void *)&sgl_desc; 304 MOCK_SET(spdk_mempool_get, &data); 305 306 /* part 1: 2 segments each with 1 wr. */ 307 reset_nvmf_rdma_request(&rdma_req); 308 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 309 for (i = 0; i < 2; i++) { 310 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 311 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 312 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 313 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 314 sgl_desc[i].keyed.key = 0x44; 315 } 316 317 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 318 319 CU_ASSERT(rc == 0); 320 CU_ASSERT(rdma_req.req.data_from_pool == true); 321 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 322 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 323 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 324 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 325 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 326 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 327 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 328 CU_ASSERT(data.wr.num_sge == 1); 329 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 330 331 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 332 reset_nvmf_rdma_request(&rdma_req); 333 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 334 for (i = 0; i < 2; i++) { 335 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 336 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 337 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 338 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 339 sgl_desc[i].keyed.key = 0x44; 340 } 341 342 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 343 344 CU_ASSERT(rc == 0); 345 CU_ASSERT(rdma_req.req.data_from_pool == true); 346 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 347 CU_ASSERT(rdma_req.req.iovcnt == 16); 348 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 349 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 350 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 351 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 352 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 353 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 354 CU_ASSERT(data.wr.num_sge == 8); 355 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 356 357 /* part 3: 2 segments, one very large, one very small */ 358 reset_nvmf_rdma_request(&rdma_req); 359 for (i = 0; i < 2; i++) { 360 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 361 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 362 sgl_desc[i].keyed.key = 0x44; 363 } 364 365 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 366 rtransport.transport.opts.io_unit_size / 2; 367 sgl_desc[0].address = 0x4000; 368 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 369 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 370 rtransport.transport.opts.io_unit_size / 2; 371 372 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 373 374 CU_ASSERT(rc == 0); 375 CU_ASSERT(rdma_req.req.data_from_pool == true); 376 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 377 CU_ASSERT(rdma_req.req.iovcnt == 16); 378 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 379 for (i = 0; i < 15; i++) { 380 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 381 } 382 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 383 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 384 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 385 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 386 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 387 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 388 rtransport.transport.opts.io_unit_size / 2); 389 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 390 CU_ASSERT(data.wr.num_sge == 1); 391 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 392 393 /* part 4: 2 SGL descriptors, each length is transport buffer / 2 394 * 1 transport buffers should be allocated */ 395 reset_nvmf_rdma_request(&rdma_req); 396 aligned_buffer_address = ((uintptr_t)(&data) + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK; 397 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 398 sgl_length = rtransport.transport.opts.io_unit_size / 2; 399 for (i = 0; i < 2; i++) { 400 sgl_desc[i].keyed.length = sgl_length; 401 sgl_desc[i].address = 0x4000 + i * sgl_length; 402 } 403 404 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 405 406 CU_ASSERT(rc == 0); 407 CU_ASSERT(rdma_req.req.data_from_pool == true); 408 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size); 409 CU_ASSERT(rdma_req.req.iovcnt == 1); 410 411 CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length); 412 /* We mocked mempool_get to return address of data variable. Mempool is used 413 * to get both additional WRs and data buffers, so data points to &data */ 414 CU_ASSERT(rdma_req.data.sgl[0].addr == aligned_buffer_address); 415 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 416 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 417 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 418 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 419 420 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 421 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length); 422 CU_ASSERT(data.sgl[0].length == sgl_length); 423 CU_ASSERT(data.sgl[0].addr == aligned_buffer_address + sgl_length); 424 CU_ASSERT(data.wr.num_sge == 1); 425 426 /* Test 4: use PG buffer cache */ 427 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 428 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 429 sgl->address = 0xFFFF; 430 rdma_req.recv->buf = (void *)0xDDDD; 431 sgl->keyed.key = 0xEEEE; 432 433 for (i = 0; i < 4; i++) { 434 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 435 } 436 437 /* part 1: use the four buffers from the pg cache */ 438 group.group.buf_cache_size = 4; 439 group.group.buf_cache_count = 4; 440 MOCK_SET(spdk_mempool_get, (void *)0x2000); 441 reset_nvmf_rdma_request(&rdma_req); 442 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 443 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 444 445 SPDK_CU_ASSERT_FATAL(rc == 0); 446 CU_ASSERT(rdma_req.req.data_from_pool == true); 447 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 448 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 449 ~NVMF_DATA_BUFFER_MASK)); 450 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 451 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 452 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 453 CU_ASSERT(group.group.buf_cache_count == 0); 454 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 455 for (i = 0; i < 4; i++) { 456 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 457 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 458 ~NVMF_DATA_BUFFER_MASK)); 459 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 460 } 461 462 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 463 reset_nvmf_rdma_request(&rdma_req); 464 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 465 466 SPDK_CU_ASSERT_FATAL(rc == 0); 467 CU_ASSERT(rdma_req.req.data_from_pool == true); 468 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 469 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 470 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 471 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 472 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 473 CU_ASSERT(group.group.buf_cache_count == 0); 474 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 475 for (i = 0; i < 4; i++) { 476 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 477 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 478 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 479 CU_ASSERT(group.group.buf_cache_count == 0); 480 } 481 482 /* part 3: half and half */ 483 group.group.buf_cache_count = 2; 484 485 for (i = 0; i < 2; i++) { 486 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 487 } 488 reset_nvmf_rdma_request(&rdma_req); 489 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 490 491 SPDK_CU_ASSERT_FATAL(rc == 0); 492 CU_ASSERT(rdma_req.req.data_from_pool == true); 493 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 494 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 495 ~NVMF_DATA_BUFFER_MASK)); 496 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 497 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 498 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 499 CU_ASSERT(group.group.buf_cache_count == 0); 500 for (i = 0; i < 2; i++) { 501 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 502 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 503 ~NVMF_DATA_BUFFER_MASK)); 504 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 505 } 506 for (i = 2; i < 4; i++) { 507 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 508 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 509 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 510 } 511 512 reset_nvmf_rdma_request(&rdma_req); 513 } 514 515 static struct spdk_nvmf_rdma_recv * 516 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 517 { 518 struct spdk_nvmf_rdma_recv *rdma_recv; 519 union nvmf_h2c_msg *cmd; 520 struct spdk_nvme_sgl_descriptor *sgl; 521 522 rdma_recv = calloc(1, sizeof(*rdma_recv)); 523 rdma_recv->qpair = rqpair; 524 cmd = calloc(1, sizeof(*cmd)); 525 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 526 cmd->nvme_cmd.opc = opc; 527 sgl = &cmd->nvme_cmd.dptr.sgl1; 528 sgl->keyed.key = 0xEEEE; 529 sgl->address = 0xFFFF; 530 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 531 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 532 sgl->keyed.length = 1; 533 534 return rdma_recv; 535 } 536 537 static void 538 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 539 { 540 free((void *)rdma_recv->sgl[0].addr); 541 free(rdma_recv); 542 } 543 544 static struct spdk_nvmf_rdma_request * 545 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 546 struct spdk_nvmf_rdma_recv *rdma_recv) 547 { 548 struct spdk_nvmf_rdma_request *rdma_req; 549 union nvmf_c2h_msg *cpl; 550 551 rdma_req = calloc(1, sizeof(*rdma_req)); 552 rdma_req->recv = rdma_recv; 553 rdma_req->req.qpair = &rqpair->qpair; 554 rdma_req->state = RDMA_REQUEST_STATE_NEW; 555 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 556 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 557 cpl = calloc(1, sizeof(*cpl)); 558 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 559 rdma_req->req.rsp = cpl; 560 561 return rdma_req; 562 } 563 564 static void 565 free_req(struct spdk_nvmf_rdma_request *rdma_req) 566 { 567 free((void *)rdma_req->rsp.sgl[0].addr); 568 free(rdma_req); 569 } 570 571 static void 572 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 573 struct spdk_nvmf_rdma_poller *poller, 574 struct spdk_nvmf_rdma_device *device, 575 struct spdk_nvmf_rdma_resources *resources, 576 struct spdk_nvmf_transport *transport) 577 { 578 memset(rqpair, 0, sizeof(*rqpair)); 579 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 580 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 581 rqpair->poller = poller; 582 rqpair->device = device; 583 rqpair->resources = resources; 584 rqpair->qpair.qid = 1; 585 rqpair->ibv_state = IBV_QPS_RTS; 586 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 587 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 588 rqpair->max_send_depth = 16; 589 rqpair->max_read_depth = 16; 590 rqpair->qpair.transport = transport; 591 } 592 593 static void 594 poller_reset(struct spdk_nvmf_rdma_poller *poller, 595 struct spdk_nvmf_rdma_poll_group *group) 596 { 597 memset(poller, 0, sizeof(*poller)); 598 STAILQ_INIT(&poller->qpairs_pending_recv); 599 STAILQ_INIT(&poller->qpairs_pending_send); 600 poller->group = group; 601 } 602 603 static void 604 test_spdk_nvmf_rdma_request_process(void) 605 { 606 struct spdk_nvmf_rdma_transport rtransport = {}; 607 struct spdk_nvmf_rdma_poll_group group = {}; 608 struct spdk_nvmf_rdma_poller poller = {}; 609 struct spdk_nvmf_rdma_device device = {}; 610 struct spdk_nvmf_rdma_resources resources = {}; 611 struct spdk_nvmf_rdma_qpair rqpair = {}; 612 struct spdk_nvmf_rdma_recv *rdma_recv; 613 struct spdk_nvmf_rdma_request *rdma_req; 614 bool progress; 615 616 STAILQ_INIT(&group.group.buf_cache); 617 STAILQ_INIT(&group.group.pending_buf_queue); 618 group.group.buf_cache_size = 0; 619 group.group.buf_cache_count = 0; 620 poller_reset(&poller, &group); 621 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 622 623 rtransport.transport.opts = g_rdma_ut_transport_opts; 624 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 625 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 626 sizeof(struct spdk_nvmf_rdma_request_data), 627 0, 0); 628 MOCK_CLEAR(spdk_mempool_get); 629 630 device.attr.device_cap_flags = 0; 631 device.map = (void *)0x0; 632 633 /* Test 1: single SGL READ request */ 634 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 635 rdma_req = create_req(&rqpair, rdma_recv); 636 rqpair.current_recv_depth = 1; 637 /* NEW -> EXECUTING */ 638 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 639 CU_ASSERT(progress == true); 640 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 641 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 642 /* EXECUTED -> TRANSFERRING_C2H */ 643 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 644 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 645 CU_ASSERT(progress == true); 646 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 647 CU_ASSERT(rdma_req->recv == NULL); 648 /* COMPLETED -> FREE */ 649 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 650 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 651 CU_ASSERT(progress == true); 652 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 653 654 free_recv(rdma_recv); 655 free_req(rdma_req); 656 poller_reset(&poller, &group); 657 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 658 659 /* Test 2: single SGL WRITE request */ 660 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 661 rdma_req = create_req(&rqpair, rdma_recv); 662 rqpair.current_recv_depth = 1; 663 /* NEW -> TRANSFERRING_H2C */ 664 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 665 CU_ASSERT(progress == true); 666 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 667 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 668 STAILQ_INIT(&poller.qpairs_pending_send); 669 /* READY_TO_EXECUTE -> EXECUTING */ 670 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 671 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 672 CU_ASSERT(progress == true); 673 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 674 /* EXECUTED -> COMPLETING */ 675 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 676 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 677 CU_ASSERT(progress == true); 678 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 679 CU_ASSERT(rdma_req->recv == NULL); 680 /* COMPLETED -> FREE */ 681 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 682 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 683 CU_ASSERT(progress == true); 684 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 685 686 free_recv(rdma_recv); 687 free_req(rdma_req); 688 poller_reset(&poller, &group); 689 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 690 691 /* Test 3: WRITE+WRITE ibv_send batching */ 692 { 693 struct spdk_nvmf_rdma_recv *recv1, *recv2; 694 struct spdk_nvmf_rdma_request *req1, *req2; 695 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 696 req1 = create_req(&rqpair, recv1); 697 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 698 req2 = create_req(&rqpair, recv2); 699 700 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 701 rqpair.current_recv_depth = 1; 702 nvmf_rdma_request_process(&rtransport, req1); 703 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 704 705 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 706 rqpair.current_recv_depth = 2; 707 nvmf_rdma_request_process(&rtransport, req2); 708 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 709 710 STAILQ_INIT(&poller.qpairs_pending_send); 711 712 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 713 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 714 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 715 nvmf_rdma_request_process(&rtransport, req1); 716 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 717 /* WRITE 1: EXECUTED -> COMPLETING */ 718 req1->state = RDMA_REQUEST_STATE_EXECUTED; 719 nvmf_rdma_request_process(&rtransport, req1); 720 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 721 STAILQ_INIT(&poller.qpairs_pending_send); 722 /* WRITE 1: COMPLETED -> FREE */ 723 req1->state = RDMA_REQUEST_STATE_COMPLETED; 724 nvmf_rdma_request_process(&rtransport, req1); 725 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 726 727 /* Now WRITE 2 has finished reading and completes */ 728 /* WRITE 2: COMPLETED -> FREE */ 729 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 730 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 731 nvmf_rdma_request_process(&rtransport, req2); 732 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 733 /* WRITE 1: EXECUTED -> COMPLETING */ 734 req2->state = RDMA_REQUEST_STATE_EXECUTED; 735 nvmf_rdma_request_process(&rtransport, req2); 736 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 737 STAILQ_INIT(&poller.qpairs_pending_send); 738 /* WRITE 1: COMPLETED -> FREE */ 739 req2->state = RDMA_REQUEST_STATE_COMPLETED; 740 nvmf_rdma_request_process(&rtransport, req2); 741 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 742 743 free_recv(recv1); 744 free_req(req1); 745 free_recv(recv2); 746 free_req(req2); 747 poller_reset(&poller, &group); 748 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 749 } 750 751 /* Test 4, invalid command, check xfer type */ 752 { 753 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 754 struct spdk_nvmf_rdma_request *rdma_req_inv; 755 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 756 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 757 758 rdma_recv_inv = create_recv(&rqpair, opc); 759 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 760 761 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 762 rqpair.current_recv_depth = 1; 763 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 764 CU_ASSERT(progress == true); 765 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 766 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 767 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 768 769 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 770 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 771 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 772 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 773 774 free_recv(rdma_recv_inv); 775 free_req(rdma_req_inv); 776 poller_reset(&poller, &group); 777 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 778 } 779 780 spdk_mempool_free(rtransport.transport.data_buf_pool); 781 spdk_mempool_free(rtransport.data_wr_pool); 782 } 783 784 #define TEST_GROUPS_COUNT 5 785 static void 786 test_nvmf_rdma_get_optimal_poll_group(void) 787 { 788 struct spdk_nvmf_rdma_transport rtransport = {}; 789 struct spdk_nvmf_transport *transport = &rtransport.transport; 790 struct spdk_nvmf_rdma_qpair rqpair = {}; 791 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 792 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 793 struct spdk_nvmf_transport_poll_group *result; 794 uint32_t i; 795 796 rqpair.qpair.transport = transport; 797 pthread_mutex_init(&rtransport.lock, NULL); 798 TAILQ_INIT(&rtransport.poll_groups); 799 800 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 801 groups[i] = nvmf_rdma_poll_group_create(transport); 802 CU_ASSERT(groups[i] != NULL); 803 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 804 groups[i]->transport = transport; 805 } 806 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 807 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 808 809 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 810 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 811 rqpair.qpair.qid = 0; 812 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 813 CU_ASSERT(result == groups[i]); 814 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 815 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 816 817 rqpair.qpair.qid = 1; 818 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 819 CU_ASSERT(result == groups[i]); 820 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 821 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 822 } 823 /* wrap around, admin/io pg point to the first pg 824 Destroy all poll groups except of the last one */ 825 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 826 nvmf_rdma_poll_group_destroy(groups[i]); 827 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 828 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 829 } 830 831 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 832 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 833 834 /* Check that pointers to the next admin/io poll groups are not changed */ 835 rqpair.qpair.qid = 0; 836 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 837 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 838 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 839 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 840 841 rqpair.qpair.qid = 1; 842 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 843 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 844 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 845 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 846 847 /* Remove the last poll group, check that pointers are NULL */ 848 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 849 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 850 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 851 852 /* Request optimal poll group, result must be NULL */ 853 rqpair.qpair.qid = 0; 854 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 855 CU_ASSERT(result == NULL); 856 857 rqpair.qpair.qid = 1; 858 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 859 CU_ASSERT(result == NULL); 860 861 pthread_mutex_destroy(&rtransport.lock); 862 } 863 #undef TEST_GROUPS_COUNT 864 865 static void 866 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 867 { 868 struct spdk_nvmf_rdma_transport rtransport; 869 struct spdk_nvmf_rdma_device device; 870 struct spdk_nvmf_rdma_request rdma_req = {}; 871 struct spdk_nvmf_rdma_recv recv; 872 struct spdk_nvmf_rdma_poll_group group; 873 struct spdk_nvmf_rdma_qpair rqpair; 874 struct spdk_nvmf_rdma_poller poller; 875 union nvmf_c2h_msg cpl; 876 union nvmf_h2c_msg cmd; 877 struct spdk_nvme_sgl_descriptor *sgl; 878 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 879 char data_buffer[8192]; 880 struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer; 881 char data2_buffer[8192]; 882 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 883 const uint32_t data_bs = 512; 884 const uint32_t md_size = 8; 885 int rc, i; 886 void *aligned_buffer; 887 888 data->wr.sg_list = data->sgl; 889 STAILQ_INIT(&group.group.buf_cache); 890 group.group.buf_cache_size = 0; 891 group.group.buf_cache_count = 0; 892 group.group.transport = &rtransport.transport; 893 poller.group = &group; 894 rqpair.poller = &poller; 895 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 896 897 sgl = &cmd.nvme_cmd.dptr.sgl1; 898 rdma_req.recv = &recv; 899 rdma_req.req.cmd = &cmd; 900 rdma_req.req.rsp = &cpl; 901 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 902 rdma_req.req.qpair = &rqpair.qpair; 903 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 904 905 rtransport.transport.opts = g_rdma_ut_transport_opts; 906 rtransport.data_wr_pool = NULL; 907 rtransport.transport.data_buf_pool = NULL; 908 909 device.attr.device_cap_flags = 0; 910 device.map = NULL; 911 sgl->keyed.key = 0xEEEE; 912 sgl->address = 0xFFFF; 913 rdma_req.recv->buf = (void *)0xDDDD; 914 915 /* Test 1: sgl type: keyed data block subtype: address */ 916 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 917 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 918 919 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 920 MOCK_SET(spdk_mempool_get, (void *)0x2000); 921 reset_nvmf_rdma_request(&rdma_req); 922 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 923 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 924 0, 0, 0, 0, 0); 925 rdma_req.req.dif_enabled = true; 926 rtransport.transport.opts.io_unit_size = data_bs * 8; 927 sgl->keyed.length = data_bs * 4; 928 929 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 930 931 CU_ASSERT(rc == 0); 932 CU_ASSERT(rdma_req.req.data_from_pool == true); 933 CU_ASSERT(rdma_req.req.length == data_bs * 4); 934 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 935 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 936 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 937 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 938 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 939 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 940 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 941 942 for (i = 0; i < 4; ++i) { 943 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 944 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 945 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 946 } 947 948 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 949 block size 512 */ 950 MOCK_SET(spdk_mempool_get, (void *)0x2000); 951 reset_nvmf_rdma_request(&rdma_req); 952 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 953 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 954 0, 0, 0, 0, 0); 955 rdma_req.req.dif_enabled = true; 956 rtransport.transport.opts.io_unit_size = data_bs * 4; 957 sgl->keyed.length = data_bs * 4; 958 959 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 960 961 CU_ASSERT(rc == 0); 962 CU_ASSERT(rdma_req.req.data_from_pool == true); 963 CU_ASSERT(rdma_req.req.length == data_bs * 4); 964 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 965 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 966 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 967 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 968 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 969 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 970 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 971 972 for (i = 0; i < 3; ++i) { 973 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 974 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 975 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 976 } 977 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 978 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 979 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 980 981 /* 2nd buffer consumed */ 982 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 983 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 984 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 985 986 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 987 MOCK_SET(spdk_mempool_get, (void *)0x2000); 988 reset_nvmf_rdma_request(&rdma_req); 989 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 990 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 991 0, 0, 0, 0, 0); 992 rdma_req.req.dif_enabled = true; 993 rtransport.transport.opts.io_unit_size = data_bs; 994 sgl->keyed.length = data_bs; 995 996 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 997 998 CU_ASSERT(rc == 0); 999 CU_ASSERT(rdma_req.req.data_from_pool == true); 1000 CU_ASSERT(rdma_req.req.length == data_bs); 1001 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1002 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1003 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1004 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1005 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1006 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1007 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1008 1009 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1010 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1011 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1012 1013 CU_ASSERT(rdma_req.req.iovcnt == 2); 1014 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1015 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1016 /* 2nd buffer consumed for metadata */ 1017 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1018 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1019 1020 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1021 block size 512 */ 1022 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1023 reset_nvmf_rdma_request(&rdma_req); 1024 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1025 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1026 0, 0, 0, 0, 0); 1027 rdma_req.req.dif_enabled = true; 1028 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1029 sgl->keyed.length = data_bs * 4; 1030 1031 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1032 1033 CU_ASSERT(rc == 0); 1034 CU_ASSERT(rdma_req.req.data_from_pool == true); 1035 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1036 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1037 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1038 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1039 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1040 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1041 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1042 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1043 1044 for (i = 0; i < 4; ++i) { 1045 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1046 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1047 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1048 } 1049 1050 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1051 block size 512 */ 1052 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1053 reset_nvmf_rdma_request(&rdma_req); 1054 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1055 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1056 0, 0, 0, 0, 0); 1057 rdma_req.req.dif_enabled = true; 1058 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1059 sgl->keyed.length = data_bs * 4; 1060 1061 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1062 1063 CU_ASSERT(rc == 0); 1064 CU_ASSERT(rdma_req.req.data_from_pool == true); 1065 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1066 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1067 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1068 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1069 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1070 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1071 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1072 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1073 1074 for (i = 0; i < 2; ++i) { 1075 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1076 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1077 } 1078 for (i = 0; i < 2; ++i) { 1079 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1080 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1081 } 1082 1083 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1084 block size 512 */ 1085 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1086 reset_nvmf_rdma_request(&rdma_req); 1087 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1088 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1089 0, 0, 0, 0, 0); 1090 rdma_req.req.dif_enabled = true; 1091 rtransport.transport.opts.io_unit_size = data_bs * 4; 1092 sgl->keyed.length = data_bs * 6; 1093 1094 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1095 1096 CU_ASSERT(rc == 0); 1097 CU_ASSERT(rdma_req.req.data_from_pool == true); 1098 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1099 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1100 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1101 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1102 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1103 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1104 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1105 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1106 1107 for (i = 0; i < 3; ++i) { 1108 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1109 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1110 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1111 } 1112 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1113 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1114 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 1115 1116 /* 2nd IO buffer consumed */ 1117 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1118 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1119 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 1120 1121 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1122 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1123 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY); 1124 1125 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1126 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1127 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY); 1128 1129 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1130 one WR can hold. Additional WR is chained */ 1131 MOCK_SET(spdk_mempool_get, data2_buffer); 1132 aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) & 1133 ~NVMF_DATA_BUFFER_MASK); 1134 reset_nvmf_rdma_request(&rdma_req); 1135 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1136 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1137 0, 0, 0, 0, 0); 1138 rdma_req.req.dif_enabled = true; 1139 rtransport.transport.opts.io_unit_size = data_bs * 16; 1140 sgl->keyed.length = data_bs * 16; 1141 1142 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1143 1144 CU_ASSERT(rc == 0); 1145 CU_ASSERT(rdma_req.req.data_from_pool == true); 1146 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1147 CU_ASSERT(rdma_req.req.iovcnt == 2); 1148 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1149 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1150 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1151 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1152 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1153 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1154 1155 for (i = 0; i < 15; ++i) { 1156 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size)); 1157 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1158 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1159 } 1160 1161 /* 8192 - (512 + 8) * 15 = 392 */ 1162 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size)); 1163 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392); 1164 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1165 1166 /* additional wr from pool */ 1167 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1168 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1169 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1170 /* 2nd IO buffer */ 1171 CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)aligned_buffer); 1172 CU_ASSERT(data2->wr.sg_list[0].length == 120); 1173 CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY); 1174 1175 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1176 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1177 reset_nvmf_rdma_request(&rdma_req); 1178 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1179 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1180 0, 0, 0, 0, 0); 1181 rdma_req.req.dif_enabled = true; 1182 rtransport.transport.opts.io_unit_size = 516; 1183 sgl->keyed.length = data_bs * 2; 1184 1185 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1186 1187 CU_ASSERT(rc == 0); 1188 CU_ASSERT(rdma_req.req.data_from_pool == true); 1189 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1190 CU_ASSERT(rdma_req.req.iovcnt == 3); 1191 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1192 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1193 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1194 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1195 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1196 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1197 1198 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1199 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1201 1202 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1203 is located at the beginning of that buffer */ 1204 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1205 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1206 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY); 1207 1208 /* Test 2: Multi SGL */ 1209 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1210 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1211 sgl->address = 0; 1212 rdma_req.recv->buf = (void *)&sgl_desc; 1213 MOCK_SET(spdk_mempool_get, data_buffer); 1214 aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) & 1215 ~NVMF_DATA_BUFFER_MASK); 1216 1217 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1218 reset_nvmf_rdma_request(&rdma_req); 1219 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1220 SPDK_DIF_TYPE1, 1221 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1222 rdma_req.req.dif_enabled = true; 1223 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1224 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1225 1226 for (i = 0; i < 2; i++) { 1227 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1228 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1229 sgl_desc[i].keyed.length = data_bs * 4; 1230 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1231 sgl_desc[i].keyed.key = 0x44; 1232 } 1233 1234 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1235 1236 CU_ASSERT(rc == 0); 1237 CU_ASSERT(rdma_req.req.data_from_pool == true); 1238 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1239 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1240 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1241 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1242 for (i = 0; i < 4; ++i) { 1243 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1244 (data_bs + md_size)); 1245 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1246 } 1247 1248 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1249 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1250 CU_ASSERT(rdma_req.data.wr.next == &data->wr); 1251 CU_ASSERT(data->wr.wr.rdma.rkey == 0x44); 1252 CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1253 CU_ASSERT(data->wr.num_sge == 4); 1254 for (i = 0; i < 4; ++i) { 1255 CU_ASSERT(data->wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1256 (data_bs + md_size)); 1257 CU_ASSERT(data->wr.sg_list[i].length == data_bs); 1258 } 1259 1260 CU_ASSERT(data->wr.next == &rdma_req.rsp.wr); 1261 } 1262 1263 static void 1264 test_nvmf_rdma_opts_init(void) 1265 { 1266 struct spdk_nvmf_transport_opts opts = {}; 1267 1268 nvmf_rdma_opts_init(&opts); 1269 CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH); 1270 CU_ASSERT(opts.max_qpairs_per_ctrlr == SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR); 1271 CU_ASSERT(opts.in_capsule_data_size == SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE); 1272 CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE); 1273 CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE); 1274 CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH); 1275 CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS); 1276 CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE); 1277 CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP); 1278 CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC); 1279 CU_ASSERT(opts.transport_specific == NULL); 1280 } 1281 1282 static void 1283 test_nvmf_rdma_request_free_data(void) 1284 { 1285 struct spdk_nvmf_rdma_request rdma_req = {}; 1286 struct spdk_nvmf_rdma_transport rtransport = {}; 1287 struct spdk_nvmf_rdma_request_data *next_request_data = NULL; 1288 1289 MOCK_CLEAR(spdk_mempool_get); 1290 rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data", 1291 SPDK_NVMF_MAX_SGL_ENTRIES, 1292 sizeof(struct spdk_nvmf_rdma_request_data), 1293 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 1294 SPDK_ENV_SOCKET_ID_ANY); 1295 next_request_data = spdk_mempool_get(rtransport.data_wr_pool); 1296 SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count == 1297 SPDK_NVMF_MAX_SGL_ENTRIES - 1); 1298 next_request_data->wr.wr_id = 1; 1299 next_request_data->wr.num_sge = 2; 1300 next_request_data->wr.next = NULL; 1301 rdma_req.data.wr.next = &next_request_data->wr; 1302 rdma_req.data.wr.wr_id = 1; 1303 rdma_req.data.wr.num_sge = 2; 1304 1305 nvmf_rdma_request_free_data(&rdma_req, &rtransport); 1306 /* Check if next_request_data put into memory pool */ 1307 CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES); 1308 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 1309 1310 spdk_mempool_free(rtransport.data_wr_pool); 1311 } 1312 1313 static void 1314 test_nvmf_rdma_update_ibv_state(void) 1315 { 1316 struct spdk_nvmf_rdma_qpair rqpair = {}; 1317 struct spdk_rdma_qp rdma_qp = {}; 1318 struct ibv_qp qp = {}; 1319 int rc = 0; 1320 1321 rqpair.rdma_qp = &rdma_qp; 1322 1323 /* Case 1: Failed to get updated RDMA queue pair state */ 1324 rqpair.ibv_state = IBV_QPS_INIT; 1325 rqpair.rdma_qp->qp = NULL; 1326 1327 rc = nvmf_rdma_update_ibv_state(&rqpair); 1328 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1329 1330 /* Case 2: Bad state updated */ 1331 rqpair.rdma_qp->qp = &qp; 1332 qp.state = IBV_QPS_ERR; 1333 rc = nvmf_rdma_update_ibv_state(&rqpair); 1334 CU_ASSERT(rqpair.ibv_state == 10); 1335 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1336 1337 /* Case 3: Pass */ 1338 qp.state = IBV_QPS_INIT; 1339 rc = nvmf_rdma_update_ibv_state(&rqpair); 1340 CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT); 1341 CU_ASSERT(rc == IBV_QPS_INIT); 1342 } 1343 1344 int main(int argc, char **argv) 1345 { 1346 CU_pSuite suite = NULL; 1347 unsigned int num_failures; 1348 1349 CU_set_error_action(CUEA_ABORT); 1350 CU_initialize_registry(); 1351 1352 suite = CU_add_suite("nvmf", NULL, NULL); 1353 1354 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1355 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1356 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1357 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1358 CU_ADD_TEST(suite, test_nvmf_rdma_opts_init); 1359 CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data); 1360 CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state); 1361 1362 CU_basic_set_mode(CU_BRM_VERBOSE); 1363 CU_basic_run_tests(); 1364 num_failures = CU_get_number_of_failures(); 1365 CU_cleanup_registry(); 1366 return num_failures; 1367 } 1368