1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019, 2021 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 #define RDMA_UT_UNITS_IN_MAX_IO 16 42 43 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 44 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 45 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 46 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 47 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 48 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 49 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 50 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 51 }; 52 53 SPDK_LOG_REGISTER_COMPONENT(nvmf) 54 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 55 uint64_t size, uint64_t translation), 0); 56 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 57 uint64_t size), 0); 58 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 59 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 60 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 61 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 62 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 63 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0); 64 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 65 66 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 67 struct spdk_nvmf_ctrlr_data *cdata)); 68 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 69 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 70 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 71 const struct spdk_nvme_transport_id *trid2), 0); 72 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 73 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 74 struct spdk_dif_ctx *dif_ctx), false); 75 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 76 enum spdk_nvme_transport_type trtype)); 77 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 78 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 79 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 80 81 int ibv_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, 82 int attr_mask, struct ibv_qp_init_attr *init_attr) 83 { 84 if (qp == NULL) { 85 return -1; 86 } else { 87 attr->port_num = 80; 88 89 if (qp->state == IBV_QPS_ERR) { 90 attr->qp_state = 10; 91 } else { 92 attr->qp_state = IBV_QPS_INIT; 93 } 94 95 return 0; 96 } 97 } 98 99 const char * 100 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 101 { 102 switch (trtype) { 103 case SPDK_NVME_TRANSPORT_PCIE: 104 return "PCIe"; 105 case SPDK_NVME_TRANSPORT_RDMA: 106 return "RDMA"; 107 case SPDK_NVME_TRANSPORT_FC: 108 return "FC"; 109 default: 110 return NULL; 111 } 112 } 113 114 int 115 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 116 { 117 int len, i; 118 119 if (trstring == NULL) { 120 return -EINVAL; 121 } 122 123 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 124 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 125 return -EINVAL; 126 } 127 128 /* cast official trstring to uppercase version of input. */ 129 for (i = 0; i < len; i++) { 130 trid->trstring[i] = toupper(trstring[i]); 131 } 132 return 0; 133 } 134 135 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 136 { 137 int i; 138 139 rdma_req->req.length = 0; 140 rdma_req->req.data_from_pool = false; 141 rdma_req->req.data = NULL; 142 rdma_req->data.wr.num_sge = 0; 143 rdma_req->data.wr.wr.rdma.remote_addr = 0; 144 rdma_req->data.wr.wr.rdma.rkey = 0; 145 rdma_req->offset = 0; 146 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 147 148 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 149 rdma_req->req.iov[i].iov_base = 0; 150 rdma_req->req.iov[i].iov_len = 0; 151 rdma_req->req.buffers[i] = 0; 152 rdma_req->data.wr.sg_list[i].addr = 0; 153 rdma_req->data.wr.sg_list[i].length = 0; 154 rdma_req->data.wr.sg_list[i].lkey = 0; 155 } 156 rdma_req->req.iovcnt = 0; 157 } 158 159 static void 160 test_spdk_nvmf_rdma_request_parse_sgl(void) 161 { 162 struct spdk_nvmf_rdma_transport rtransport; 163 struct spdk_nvmf_rdma_device device; 164 struct spdk_nvmf_rdma_request rdma_req = {}; 165 struct spdk_nvmf_rdma_recv recv; 166 struct spdk_nvmf_rdma_poll_group group; 167 struct spdk_nvmf_rdma_qpair rqpair; 168 struct spdk_nvmf_rdma_poller poller; 169 union nvmf_c2h_msg cpl; 170 union nvmf_h2c_msg cmd; 171 struct spdk_nvme_sgl_descriptor *sgl; 172 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 173 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 174 struct spdk_nvmf_rdma_request_data data; 175 int rc, i; 176 uint32_t sgl_length; 177 uintptr_t aligned_buffer_address; 178 179 data.wr.sg_list = data.sgl; 180 STAILQ_INIT(&group.group.buf_cache); 181 group.group.buf_cache_size = 0; 182 group.group.buf_cache_count = 0; 183 group.group.transport = &rtransport.transport; 184 poller.group = &group; 185 rqpair.poller = &poller; 186 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 187 188 sgl = &cmd.nvme_cmd.dptr.sgl1; 189 rdma_req.recv = &recv; 190 rdma_req.req.cmd = &cmd; 191 rdma_req.req.rsp = &cpl; 192 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 193 rdma_req.req.qpair = &rqpair.qpair; 194 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 195 196 rtransport.transport.opts = g_rdma_ut_transport_opts; 197 rtransport.data_wr_pool = NULL; 198 rtransport.transport.data_buf_pool = NULL; 199 200 device.attr.device_cap_flags = 0; 201 sgl->keyed.key = 0xEEEE; 202 sgl->address = 0xFFFF; 203 rdma_req.recv->buf = (void *)0xDDDD; 204 205 /* Test 1: sgl type: keyed data block subtype: address */ 206 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 207 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 208 209 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 210 MOCK_SET(spdk_mempool_get, (void *)0x2000); 211 reset_nvmf_rdma_request(&rdma_req); 212 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 213 214 device.map = (void *)0x0; 215 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 216 CU_ASSERT(rc == 0); 217 CU_ASSERT(rdma_req.req.data_from_pool == true); 218 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 219 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 220 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 221 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 222 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 223 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 224 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 225 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 226 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 227 228 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 229 reset_nvmf_rdma_request(&rdma_req); 230 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 231 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 232 233 CU_ASSERT(rc == 0); 234 CU_ASSERT(rdma_req.req.data_from_pool == true); 235 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 236 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 237 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 238 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 239 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 240 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 241 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 242 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 243 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 244 } 245 246 /* Part 3: simple I/O one SGL larger than the transport max io size */ 247 reset_nvmf_rdma_request(&rdma_req); 248 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 249 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 250 251 CU_ASSERT(rc == -1); 252 253 /* Part 4: Pretend there are no buffer pools */ 254 MOCK_SET(spdk_mempool_get, NULL); 255 reset_nvmf_rdma_request(&rdma_req); 256 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 257 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 258 259 CU_ASSERT(rc == 0); 260 CU_ASSERT(rdma_req.req.data_from_pool == false); 261 CU_ASSERT(rdma_req.req.data == NULL); 262 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 263 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 264 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 265 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 266 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 267 268 rdma_req.recv->buf = (void *)0xDDDD; 269 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 270 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 271 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 272 273 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 274 reset_nvmf_rdma_request(&rdma_req); 275 sgl->address = 0; 276 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 277 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 278 279 CU_ASSERT(rc == 0); 280 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 281 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 282 CU_ASSERT(rdma_req.req.data_from_pool == false); 283 284 /* Part 2: I/O offset + length too large */ 285 reset_nvmf_rdma_request(&rdma_req); 286 sgl->address = rtransport.transport.opts.in_capsule_data_size; 287 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 288 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 289 290 CU_ASSERT(rc == -1); 291 292 /* Part 3: I/O too large */ 293 reset_nvmf_rdma_request(&rdma_req); 294 sgl->address = 0; 295 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 296 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 297 298 CU_ASSERT(rc == -1); 299 300 /* Test 3: Multi SGL */ 301 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 302 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 303 sgl->address = 0; 304 rdma_req.recv->buf = (void *)&sgl_desc; 305 MOCK_SET(spdk_mempool_get, &data); 306 307 /* part 1: 2 segments each with 1 wr. */ 308 reset_nvmf_rdma_request(&rdma_req); 309 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 310 for (i = 0; i < 2; i++) { 311 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 312 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 313 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 314 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 315 sgl_desc[i].keyed.key = 0x44; 316 } 317 318 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 319 320 CU_ASSERT(rc == 0); 321 CU_ASSERT(rdma_req.req.data_from_pool == true); 322 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 323 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 324 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 325 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 326 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 327 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 328 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 329 CU_ASSERT(data.wr.num_sge == 1); 330 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 331 332 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 333 reset_nvmf_rdma_request(&rdma_req); 334 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 335 for (i = 0; i < 2; i++) { 336 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 337 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 338 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 339 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 340 sgl_desc[i].keyed.key = 0x44; 341 } 342 343 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 344 345 CU_ASSERT(rc == 0); 346 CU_ASSERT(rdma_req.req.data_from_pool == true); 347 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 348 CU_ASSERT(rdma_req.req.iovcnt == 16); 349 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 350 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 351 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 352 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 353 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 354 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 355 CU_ASSERT(data.wr.num_sge == 8); 356 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 357 358 /* part 3: 2 segments, one very large, one very small */ 359 reset_nvmf_rdma_request(&rdma_req); 360 for (i = 0; i < 2; i++) { 361 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 362 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 363 sgl_desc[i].keyed.key = 0x44; 364 } 365 366 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 367 rtransport.transport.opts.io_unit_size / 2; 368 sgl_desc[0].address = 0x4000; 369 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 370 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 371 rtransport.transport.opts.io_unit_size / 2; 372 373 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 374 375 CU_ASSERT(rc == 0); 376 CU_ASSERT(rdma_req.req.data_from_pool == true); 377 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 378 CU_ASSERT(rdma_req.req.iovcnt == 16); 379 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 380 for (i = 0; i < 15; i++) { 381 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 382 } 383 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 384 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 385 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 386 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 387 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 388 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 389 rtransport.transport.opts.io_unit_size / 2); 390 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 391 CU_ASSERT(data.wr.num_sge == 1); 392 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 393 394 /* part 4: 2 SGL descriptors, each length is transport buffer / 2 395 * 1 transport buffers should be allocated */ 396 reset_nvmf_rdma_request(&rdma_req); 397 aligned_buffer_address = ((uintptr_t)(&data) + NVMF_DATA_BUFFER_MASK) & ~NVMF_DATA_BUFFER_MASK; 398 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 399 sgl_length = rtransport.transport.opts.io_unit_size / 2; 400 for (i = 0; i < 2; i++) { 401 sgl_desc[i].keyed.length = sgl_length; 402 sgl_desc[i].address = 0x4000 + i * sgl_length; 403 } 404 405 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 406 407 CU_ASSERT(rc == 0); 408 CU_ASSERT(rdma_req.req.data_from_pool == true); 409 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size); 410 CU_ASSERT(rdma_req.req.iovcnt == 1); 411 412 CU_ASSERT(rdma_req.data.sgl[0].length == sgl_length); 413 /* We mocked mempool_get to return address of data variable. Mempool is used 414 * to get both additional WRs and data buffers, so data points to &data */ 415 CU_ASSERT(rdma_req.data.sgl[0].addr == aligned_buffer_address); 416 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 417 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 418 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 419 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 420 421 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 422 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + sgl_length); 423 CU_ASSERT(data.sgl[0].length == sgl_length); 424 CU_ASSERT(data.sgl[0].addr == aligned_buffer_address + sgl_length); 425 CU_ASSERT(data.wr.num_sge == 1); 426 427 /* Test 4: use PG buffer cache */ 428 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 429 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 430 sgl->address = 0xFFFF; 431 rdma_req.recv->buf = (void *)0xDDDD; 432 sgl->keyed.key = 0xEEEE; 433 434 for (i = 0; i < 4; i++) { 435 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 436 } 437 438 /* part 1: use the four buffers from the pg cache */ 439 group.group.buf_cache_size = 4; 440 group.group.buf_cache_count = 4; 441 MOCK_SET(spdk_mempool_get, (void *)0x2000); 442 reset_nvmf_rdma_request(&rdma_req); 443 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 444 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 445 446 SPDK_CU_ASSERT_FATAL(rc == 0); 447 CU_ASSERT(rdma_req.req.data_from_pool == true); 448 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 449 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 450 ~NVMF_DATA_BUFFER_MASK)); 451 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 452 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 453 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 454 CU_ASSERT(group.group.buf_cache_count == 0); 455 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 456 for (i = 0; i < 4; i++) { 457 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 458 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 459 ~NVMF_DATA_BUFFER_MASK)); 460 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 461 } 462 463 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 464 reset_nvmf_rdma_request(&rdma_req); 465 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 466 467 SPDK_CU_ASSERT_FATAL(rc == 0); 468 CU_ASSERT(rdma_req.req.data_from_pool == true); 469 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 470 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 471 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 472 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 473 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 474 CU_ASSERT(group.group.buf_cache_count == 0); 475 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 476 for (i = 0; i < 4; i++) { 477 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 478 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 480 CU_ASSERT(group.group.buf_cache_count == 0); 481 } 482 483 /* part 3: half and half */ 484 group.group.buf_cache_count = 2; 485 486 for (i = 0; i < 2; i++) { 487 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 488 } 489 reset_nvmf_rdma_request(&rdma_req); 490 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 491 492 SPDK_CU_ASSERT_FATAL(rc == 0); 493 CU_ASSERT(rdma_req.req.data_from_pool == true); 494 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 495 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 496 ~NVMF_DATA_BUFFER_MASK)); 497 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 498 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 499 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 500 CU_ASSERT(group.group.buf_cache_count == 0); 501 for (i = 0; i < 2; i++) { 502 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 503 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 504 ~NVMF_DATA_BUFFER_MASK)); 505 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 506 } 507 for (i = 2; i < 4; i++) { 508 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 509 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 510 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 511 } 512 513 reset_nvmf_rdma_request(&rdma_req); 514 } 515 516 static struct spdk_nvmf_rdma_recv * 517 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 518 { 519 struct spdk_nvmf_rdma_recv *rdma_recv; 520 union nvmf_h2c_msg *cmd; 521 struct spdk_nvme_sgl_descriptor *sgl; 522 523 rdma_recv = calloc(1, sizeof(*rdma_recv)); 524 rdma_recv->qpair = rqpair; 525 cmd = calloc(1, sizeof(*cmd)); 526 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 527 cmd->nvme_cmd.opc = opc; 528 sgl = &cmd->nvme_cmd.dptr.sgl1; 529 sgl->keyed.key = 0xEEEE; 530 sgl->address = 0xFFFF; 531 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 532 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 533 sgl->keyed.length = 1; 534 535 return rdma_recv; 536 } 537 538 static void 539 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 540 { 541 free((void *)rdma_recv->sgl[0].addr); 542 free(rdma_recv); 543 } 544 545 static struct spdk_nvmf_rdma_request * 546 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 547 struct spdk_nvmf_rdma_recv *rdma_recv) 548 { 549 struct spdk_nvmf_rdma_request *rdma_req; 550 union nvmf_c2h_msg *cpl; 551 552 rdma_req = calloc(1, sizeof(*rdma_req)); 553 rdma_req->recv = rdma_recv; 554 rdma_req->req.qpair = &rqpair->qpair; 555 rdma_req->state = RDMA_REQUEST_STATE_NEW; 556 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 557 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 558 cpl = calloc(1, sizeof(*cpl)); 559 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 560 rdma_req->req.rsp = cpl; 561 562 return rdma_req; 563 } 564 565 static void 566 free_req(struct spdk_nvmf_rdma_request *rdma_req) 567 { 568 free((void *)rdma_req->rsp.sgl[0].addr); 569 free(rdma_req); 570 } 571 572 static void 573 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 574 struct spdk_nvmf_rdma_poller *poller, 575 struct spdk_nvmf_rdma_device *device, 576 struct spdk_nvmf_rdma_resources *resources, 577 struct spdk_nvmf_transport *transport) 578 { 579 memset(rqpair, 0, sizeof(*rqpair)); 580 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 581 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 582 rqpair->poller = poller; 583 rqpair->device = device; 584 rqpair->resources = resources; 585 rqpair->qpair.qid = 1; 586 rqpair->ibv_state = IBV_QPS_RTS; 587 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 588 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 589 rqpair->max_send_depth = 16; 590 rqpair->max_read_depth = 16; 591 rqpair->qpair.transport = transport; 592 } 593 594 static void 595 poller_reset(struct spdk_nvmf_rdma_poller *poller, 596 struct spdk_nvmf_rdma_poll_group *group) 597 { 598 memset(poller, 0, sizeof(*poller)); 599 STAILQ_INIT(&poller->qpairs_pending_recv); 600 STAILQ_INIT(&poller->qpairs_pending_send); 601 poller->group = group; 602 } 603 604 static void 605 test_spdk_nvmf_rdma_request_process(void) 606 { 607 struct spdk_nvmf_rdma_transport rtransport = {}; 608 struct spdk_nvmf_rdma_poll_group group = {}; 609 struct spdk_nvmf_rdma_poller poller = {}; 610 struct spdk_nvmf_rdma_device device = {}; 611 struct spdk_nvmf_rdma_resources resources = {}; 612 struct spdk_nvmf_rdma_qpair rqpair = {}; 613 struct spdk_nvmf_rdma_recv *rdma_recv; 614 struct spdk_nvmf_rdma_request *rdma_req; 615 bool progress; 616 617 STAILQ_INIT(&group.group.buf_cache); 618 STAILQ_INIT(&group.group.pending_buf_queue); 619 group.group.buf_cache_size = 0; 620 group.group.buf_cache_count = 0; 621 poller_reset(&poller, &group); 622 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 623 624 rtransport.transport.opts = g_rdma_ut_transport_opts; 625 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 626 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 627 sizeof(struct spdk_nvmf_rdma_request_data), 628 0, 0); 629 MOCK_CLEAR(spdk_mempool_get); 630 631 device.attr.device_cap_flags = 0; 632 device.map = (void *)0x0; 633 634 /* Test 1: single SGL READ request */ 635 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 636 rdma_req = create_req(&rqpair, rdma_recv); 637 rqpair.current_recv_depth = 1; 638 /* NEW -> EXECUTING */ 639 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 640 CU_ASSERT(progress == true); 641 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 642 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 643 /* EXECUTED -> TRANSFERRING_C2H */ 644 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 645 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 646 CU_ASSERT(progress == true); 647 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 648 CU_ASSERT(rdma_req->recv == NULL); 649 /* COMPLETED -> FREE */ 650 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 651 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 652 CU_ASSERT(progress == true); 653 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 654 655 free_recv(rdma_recv); 656 free_req(rdma_req); 657 poller_reset(&poller, &group); 658 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 659 660 /* Test 2: single SGL WRITE request */ 661 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 662 rdma_req = create_req(&rqpair, rdma_recv); 663 rqpair.current_recv_depth = 1; 664 /* NEW -> TRANSFERRING_H2C */ 665 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 666 CU_ASSERT(progress == true); 667 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 668 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 669 STAILQ_INIT(&poller.qpairs_pending_send); 670 /* READY_TO_EXECUTE -> EXECUTING */ 671 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 672 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 673 CU_ASSERT(progress == true); 674 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 675 /* EXECUTED -> COMPLETING */ 676 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 677 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 678 CU_ASSERT(progress == true); 679 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 680 CU_ASSERT(rdma_req->recv == NULL); 681 /* COMPLETED -> FREE */ 682 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 683 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 684 CU_ASSERT(progress == true); 685 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 686 687 free_recv(rdma_recv); 688 free_req(rdma_req); 689 poller_reset(&poller, &group); 690 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 691 692 /* Test 3: WRITE+WRITE ibv_send batching */ 693 { 694 struct spdk_nvmf_rdma_recv *recv1, *recv2; 695 struct spdk_nvmf_rdma_request *req1, *req2; 696 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 697 req1 = create_req(&rqpair, recv1); 698 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 699 req2 = create_req(&rqpair, recv2); 700 701 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 702 rqpair.current_recv_depth = 1; 703 nvmf_rdma_request_process(&rtransport, req1); 704 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 705 706 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 707 rqpair.current_recv_depth = 2; 708 nvmf_rdma_request_process(&rtransport, req2); 709 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 710 711 STAILQ_INIT(&poller.qpairs_pending_send); 712 713 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 714 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 715 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 716 nvmf_rdma_request_process(&rtransport, req1); 717 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 718 /* WRITE 1: EXECUTED -> COMPLETING */ 719 req1->state = RDMA_REQUEST_STATE_EXECUTED; 720 nvmf_rdma_request_process(&rtransport, req1); 721 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 722 STAILQ_INIT(&poller.qpairs_pending_send); 723 /* WRITE 1: COMPLETED -> FREE */ 724 req1->state = RDMA_REQUEST_STATE_COMPLETED; 725 nvmf_rdma_request_process(&rtransport, req1); 726 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 727 728 /* Now WRITE 2 has finished reading and completes */ 729 /* WRITE 2: COMPLETED -> FREE */ 730 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 731 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 732 nvmf_rdma_request_process(&rtransport, req2); 733 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 734 /* WRITE 1: EXECUTED -> COMPLETING */ 735 req2->state = RDMA_REQUEST_STATE_EXECUTED; 736 nvmf_rdma_request_process(&rtransport, req2); 737 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 738 STAILQ_INIT(&poller.qpairs_pending_send); 739 /* WRITE 1: COMPLETED -> FREE */ 740 req2->state = RDMA_REQUEST_STATE_COMPLETED; 741 nvmf_rdma_request_process(&rtransport, req2); 742 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 743 744 free_recv(recv1); 745 free_req(req1); 746 free_recv(recv2); 747 free_req(req2); 748 poller_reset(&poller, &group); 749 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 750 } 751 752 /* Test 4, invalid command, check xfer type */ 753 { 754 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 755 struct spdk_nvmf_rdma_request *rdma_req_inv; 756 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 757 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 758 759 rdma_recv_inv = create_recv(&rqpair, opc); 760 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 761 762 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 763 rqpair.current_recv_depth = 1; 764 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 765 CU_ASSERT(progress == true); 766 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 767 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 768 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 769 770 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 771 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 772 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 773 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 774 775 free_recv(rdma_recv_inv); 776 free_req(rdma_req_inv); 777 poller_reset(&poller, &group); 778 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 779 } 780 781 spdk_mempool_free(rtransport.transport.data_buf_pool); 782 spdk_mempool_free(rtransport.data_wr_pool); 783 } 784 785 #define TEST_GROUPS_COUNT 5 786 static void 787 test_nvmf_rdma_get_optimal_poll_group(void) 788 { 789 struct spdk_nvmf_rdma_transport rtransport = {}; 790 struct spdk_nvmf_transport *transport = &rtransport.transport; 791 struct spdk_nvmf_rdma_qpair rqpair = {}; 792 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 793 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 794 struct spdk_nvmf_transport_poll_group *result; 795 uint32_t i; 796 797 rqpair.qpair.transport = transport; 798 pthread_mutex_init(&rtransport.lock, NULL); 799 TAILQ_INIT(&rtransport.poll_groups); 800 801 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 802 groups[i] = nvmf_rdma_poll_group_create(transport); 803 CU_ASSERT(groups[i] != NULL); 804 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 805 groups[i]->transport = transport; 806 } 807 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 808 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 809 810 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 811 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 812 rqpair.qpair.qid = 0; 813 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 814 CU_ASSERT(result == groups[i]); 815 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 816 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 817 818 rqpair.qpair.qid = 1; 819 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 820 CU_ASSERT(result == groups[i]); 821 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 822 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 823 } 824 /* wrap around, admin/io pg point to the first pg 825 Destroy all poll groups except of the last one */ 826 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 827 nvmf_rdma_poll_group_destroy(groups[i]); 828 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 829 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 830 } 831 832 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 833 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 834 835 /* Check that pointers to the next admin/io poll groups are not changed */ 836 rqpair.qpair.qid = 0; 837 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 838 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 839 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 840 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 841 842 rqpair.qpair.qid = 1; 843 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 844 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 845 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 846 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 847 848 /* Remove the last poll group, check that pointers are NULL */ 849 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 850 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 851 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 852 853 /* Request optimal poll group, result must be NULL */ 854 rqpair.qpair.qid = 0; 855 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 856 CU_ASSERT(result == NULL); 857 858 rqpair.qpair.qid = 1; 859 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 860 CU_ASSERT(result == NULL); 861 862 pthread_mutex_destroy(&rtransport.lock); 863 } 864 #undef TEST_GROUPS_COUNT 865 866 static void 867 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 868 { 869 struct spdk_nvmf_rdma_transport rtransport; 870 struct spdk_nvmf_rdma_device device; 871 struct spdk_nvmf_rdma_request rdma_req = {}; 872 struct spdk_nvmf_rdma_recv recv; 873 struct spdk_nvmf_rdma_poll_group group; 874 struct spdk_nvmf_rdma_qpair rqpair; 875 struct spdk_nvmf_rdma_poller poller; 876 union nvmf_c2h_msg cpl; 877 union nvmf_h2c_msg cmd; 878 struct spdk_nvme_sgl_descriptor *sgl; 879 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 880 char data_buffer[8192]; 881 struct spdk_nvmf_rdma_request_data *data = (struct spdk_nvmf_rdma_request_data *)data_buffer; 882 char data2_buffer[8192]; 883 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 884 const uint32_t data_bs = 512; 885 const uint32_t md_size = 8; 886 int rc, i; 887 void *aligned_buffer; 888 889 data->wr.sg_list = data->sgl; 890 STAILQ_INIT(&group.group.buf_cache); 891 group.group.buf_cache_size = 0; 892 group.group.buf_cache_count = 0; 893 group.group.transport = &rtransport.transport; 894 poller.group = &group; 895 rqpair.poller = &poller; 896 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 897 898 sgl = &cmd.nvme_cmd.dptr.sgl1; 899 rdma_req.recv = &recv; 900 rdma_req.req.cmd = &cmd; 901 rdma_req.req.rsp = &cpl; 902 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 903 rdma_req.req.qpair = &rqpair.qpair; 904 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 905 906 rtransport.transport.opts = g_rdma_ut_transport_opts; 907 rtransport.data_wr_pool = NULL; 908 rtransport.transport.data_buf_pool = NULL; 909 910 device.attr.device_cap_flags = 0; 911 device.map = NULL; 912 sgl->keyed.key = 0xEEEE; 913 sgl->address = 0xFFFF; 914 rdma_req.recv->buf = (void *)0xDDDD; 915 916 /* Test 1: sgl type: keyed data block subtype: address */ 917 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 918 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 919 920 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 921 MOCK_SET(spdk_mempool_get, (void *)0x2000); 922 reset_nvmf_rdma_request(&rdma_req); 923 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 924 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 925 0, 0, 0, 0, 0); 926 rdma_req.req.dif_enabled = true; 927 rtransport.transport.opts.io_unit_size = data_bs * 8; 928 sgl->keyed.length = data_bs * 4; 929 930 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 931 932 CU_ASSERT(rc == 0); 933 CU_ASSERT(rdma_req.req.data_from_pool == true); 934 CU_ASSERT(rdma_req.req.length == data_bs * 4); 935 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 936 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 937 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 938 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 939 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 940 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 941 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 942 943 for (i = 0; i < 4; ++i) { 944 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 945 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 946 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 947 } 948 949 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 950 block size 512 */ 951 MOCK_SET(spdk_mempool_get, (void *)0x2000); 952 reset_nvmf_rdma_request(&rdma_req); 953 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 954 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 955 0, 0, 0, 0, 0); 956 rdma_req.req.dif_enabled = true; 957 rtransport.transport.opts.io_unit_size = data_bs * 4; 958 sgl->keyed.length = data_bs * 4; 959 960 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 961 962 CU_ASSERT(rc == 0); 963 CU_ASSERT(rdma_req.req.data_from_pool == true); 964 CU_ASSERT(rdma_req.req.length == data_bs * 4); 965 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 966 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 967 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 968 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 969 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 970 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 971 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 972 973 for (i = 0; i < 3; ++i) { 974 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 975 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 976 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 977 } 978 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 979 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 980 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 981 982 /* 2nd buffer consumed */ 983 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 984 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 985 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 986 987 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 988 MOCK_SET(spdk_mempool_get, (void *)0x2000); 989 reset_nvmf_rdma_request(&rdma_req); 990 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 991 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 992 0, 0, 0, 0, 0); 993 rdma_req.req.dif_enabled = true; 994 rtransport.transport.opts.io_unit_size = data_bs; 995 sgl->keyed.length = data_bs; 996 997 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 998 999 CU_ASSERT(rc == 0); 1000 CU_ASSERT(rdma_req.req.data_from_pool == true); 1001 CU_ASSERT(rdma_req.req.length == data_bs); 1002 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1003 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1004 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1005 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1006 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1007 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1008 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1009 1010 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1011 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1012 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1013 1014 CU_ASSERT(rdma_req.req.iovcnt == 2); 1015 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1016 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1017 /* 2nd buffer consumed for metadata */ 1018 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1019 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1020 1021 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1022 block size 512 */ 1023 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1024 reset_nvmf_rdma_request(&rdma_req); 1025 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1026 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1027 0, 0, 0, 0, 0); 1028 rdma_req.req.dif_enabled = true; 1029 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1030 sgl->keyed.length = data_bs * 4; 1031 1032 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1033 1034 CU_ASSERT(rc == 0); 1035 CU_ASSERT(rdma_req.req.data_from_pool == true); 1036 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1037 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1038 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1039 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1040 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1041 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1042 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1043 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1044 1045 for (i = 0; i < 4; ++i) { 1046 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1047 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1048 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1049 } 1050 1051 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1052 block size 512 */ 1053 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1054 reset_nvmf_rdma_request(&rdma_req); 1055 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1056 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1057 0, 0, 0, 0, 0); 1058 rdma_req.req.dif_enabled = true; 1059 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1060 sgl->keyed.length = data_bs * 4; 1061 1062 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1063 1064 CU_ASSERT(rc == 0); 1065 CU_ASSERT(rdma_req.req.data_from_pool == true); 1066 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1067 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1068 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1069 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1070 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1071 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1072 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1073 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1074 1075 for (i = 0; i < 2; ++i) { 1076 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1077 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1078 } 1079 for (i = 0; i < 2; ++i) { 1080 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1081 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1082 } 1083 1084 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1085 block size 512 */ 1086 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1087 reset_nvmf_rdma_request(&rdma_req); 1088 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1089 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1090 0, 0, 0, 0, 0); 1091 rdma_req.req.dif_enabled = true; 1092 rtransport.transport.opts.io_unit_size = data_bs * 4; 1093 sgl->keyed.length = data_bs * 6; 1094 1095 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1096 1097 CU_ASSERT(rc == 0); 1098 CU_ASSERT(rdma_req.req.data_from_pool == true); 1099 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1100 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1101 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1102 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1103 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1104 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1105 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1106 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1107 1108 for (i = 0; i < 3; ++i) { 1109 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1110 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1111 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1112 } 1113 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1114 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1115 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 1116 1117 /* 2nd IO buffer consumed */ 1118 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1119 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1120 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 1121 1122 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1123 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1124 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY); 1125 1126 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1127 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1128 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY); 1129 1130 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1131 one WR can hold. Additional WR is chained */ 1132 MOCK_SET(spdk_mempool_get, data2_buffer); 1133 aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) & 1134 ~NVMF_DATA_BUFFER_MASK); 1135 reset_nvmf_rdma_request(&rdma_req); 1136 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1137 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1138 0, 0, 0, 0, 0); 1139 rdma_req.req.dif_enabled = true; 1140 rtransport.transport.opts.io_unit_size = data_bs * 16; 1141 sgl->keyed.length = data_bs * 16; 1142 1143 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1144 1145 CU_ASSERT(rc == 0); 1146 CU_ASSERT(rdma_req.req.data_from_pool == true); 1147 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1148 CU_ASSERT(rdma_req.req.iovcnt == 2); 1149 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1150 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1151 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1152 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1153 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1154 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1155 1156 for (i = 0; i < 15; ++i) { 1157 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size)); 1158 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1159 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1160 } 1161 1162 /* 8192 - (512 + 8) * 15 = 392 */ 1163 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)aligned_buffer + i * (data_bs + md_size)); 1164 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == 392); 1165 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1166 1167 /* additional wr from pool */ 1168 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1169 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1170 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1171 /* 2nd IO buffer */ 1172 CU_ASSERT(data2->wr.sg_list[0].addr == (uintptr_t)aligned_buffer); 1173 CU_ASSERT(data2->wr.sg_list[0].length == 120); 1174 CU_ASSERT(data2->wr.sg_list[0].lkey == RDMA_UT_LKEY); 1175 1176 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1177 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1178 reset_nvmf_rdma_request(&rdma_req); 1179 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1180 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1181 0, 0, 0, 0, 0); 1182 rdma_req.req.dif_enabled = true; 1183 rtransport.transport.opts.io_unit_size = 516; 1184 sgl->keyed.length = data_bs * 2; 1185 1186 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1187 1188 CU_ASSERT(rc == 0); 1189 CU_ASSERT(rdma_req.req.data_from_pool == true); 1190 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1191 CU_ASSERT(rdma_req.req.iovcnt == 3); 1192 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1193 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1194 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1195 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1196 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1197 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1198 1199 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1201 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1202 1203 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1204 is located at the beginning of that buffer */ 1205 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1206 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1207 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY); 1208 1209 /* Test 2: Multi SGL */ 1210 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1211 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1212 sgl->address = 0; 1213 rdma_req.recv->buf = (void *)&sgl_desc; 1214 MOCK_SET(spdk_mempool_get, data_buffer); 1215 aligned_buffer = (void *)((uintptr_t)(data_buffer + NVMF_DATA_BUFFER_MASK) & 1216 ~NVMF_DATA_BUFFER_MASK); 1217 1218 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1219 reset_nvmf_rdma_request(&rdma_req); 1220 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1221 SPDK_DIF_TYPE1, 1222 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1223 rdma_req.req.dif_enabled = true; 1224 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1225 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1226 1227 for (i = 0; i < 2; i++) { 1228 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1229 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1230 sgl_desc[i].keyed.length = data_bs * 4; 1231 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1232 sgl_desc[i].keyed.key = 0x44; 1233 } 1234 1235 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1236 1237 CU_ASSERT(rc == 0); 1238 CU_ASSERT(rdma_req.req.data_from_pool == true); 1239 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1240 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1241 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1242 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1243 for (i = 0; i < 4; ++i) { 1244 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1245 (data_bs + md_size)); 1246 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1247 } 1248 1249 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1250 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1251 CU_ASSERT(rdma_req.data.wr.next == &data->wr); 1252 CU_ASSERT(data->wr.wr.rdma.rkey == 0x44); 1253 CU_ASSERT(data->wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1254 CU_ASSERT(data->wr.num_sge == 4); 1255 for (i = 0; i < 4; ++i) { 1256 CU_ASSERT(data->wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1257 (data_bs + md_size)); 1258 CU_ASSERT(data->wr.sg_list[i].length == data_bs); 1259 } 1260 1261 CU_ASSERT(data->wr.next == &rdma_req.rsp.wr); 1262 } 1263 1264 static void 1265 test_nvmf_rdma_opts_init(void) 1266 { 1267 struct spdk_nvmf_transport_opts opts = {}; 1268 1269 nvmf_rdma_opts_init(&opts); 1270 CU_ASSERT(opts.max_queue_depth == SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH); 1271 CU_ASSERT(opts.max_qpairs_per_ctrlr == SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR); 1272 CU_ASSERT(opts.in_capsule_data_size == SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE); 1273 CU_ASSERT(opts.max_io_size == SPDK_NVMF_RDMA_DEFAULT_MAX_IO_SIZE); 1274 CU_ASSERT(opts.io_unit_size == SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE); 1275 CU_ASSERT(opts.max_aq_depth == SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH); 1276 CU_ASSERT(opts.num_shared_buffers == SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS); 1277 CU_ASSERT(opts.buf_cache_size == SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE); 1278 CU_ASSERT(opts.dif_insert_or_strip == SPDK_NVMF_RDMA_DIF_INSERT_OR_STRIP); 1279 CU_ASSERT(opts.abort_timeout_sec == SPDK_NVMF_RDMA_DEFAULT_ABORT_TIMEOUT_SEC); 1280 CU_ASSERT(opts.transport_specific == NULL); 1281 } 1282 1283 static void 1284 test_nvmf_rdma_request_free_data(void) 1285 { 1286 struct spdk_nvmf_rdma_request rdma_req = {}; 1287 struct spdk_nvmf_rdma_transport rtransport = {}; 1288 struct spdk_nvmf_rdma_request_data *next_request_data = NULL; 1289 1290 MOCK_CLEAR(spdk_mempool_get); 1291 rtransport.data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data", 1292 SPDK_NVMF_MAX_SGL_ENTRIES, 1293 sizeof(struct spdk_nvmf_rdma_request_data), 1294 SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, 1295 SPDK_ENV_SOCKET_ID_ANY); 1296 next_request_data = spdk_mempool_get(rtransport.data_wr_pool); 1297 SPDK_CU_ASSERT_FATAL(((struct test_mempool *)rtransport.data_wr_pool)->count == 1298 SPDK_NVMF_MAX_SGL_ENTRIES - 1); 1299 next_request_data->wr.wr_id = 1; 1300 next_request_data->wr.num_sge = 2; 1301 next_request_data->wr.next = NULL; 1302 rdma_req.data.wr.next = &next_request_data->wr; 1303 rdma_req.data.wr.wr_id = 1; 1304 rdma_req.data.wr.num_sge = 2; 1305 1306 nvmf_rdma_request_free_data(&rdma_req, &rtransport); 1307 /* Check if next_request_data put into memory pool */ 1308 CU_ASSERT(((struct test_mempool *)rtransport.data_wr_pool)->count == SPDK_NVMF_MAX_SGL_ENTRIES); 1309 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 1310 1311 spdk_mempool_free(rtransport.data_wr_pool); 1312 } 1313 1314 static void 1315 test_nvmf_rdma_update_ibv_state(void) 1316 { 1317 struct spdk_nvmf_rdma_qpair rqpair = {}; 1318 struct spdk_rdma_qp rdma_qp = {}; 1319 struct ibv_qp qp = {}; 1320 int rc = 0; 1321 1322 rqpair.rdma_qp = &rdma_qp; 1323 1324 /* Case 1: Failed to get updated RDMA queue pair state */ 1325 rqpair.ibv_state = IBV_QPS_INIT; 1326 rqpair.rdma_qp->qp = NULL; 1327 1328 rc = nvmf_rdma_update_ibv_state(&rqpair); 1329 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1330 1331 /* Case 2: Bad state updated */ 1332 rqpair.rdma_qp->qp = &qp; 1333 qp.state = IBV_QPS_ERR; 1334 rc = nvmf_rdma_update_ibv_state(&rqpair); 1335 CU_ASSERT(rqpair.ibv_state == 10); 1336 CU_ASSERT(rc == IBV_QPS_ERR + 1); 1337 1338 /* Case 3: Pass */ 1339 qp.state = IBV_QPS_INIT; 1340 rc = nvmf_rdma_update_ibv_state(&rqpair); 1341 CU_ASSERT(rqpair.ibv_state == IBV_QPS_INIT); 1342 CU_ASSERT(rc == IBV_QPS_INIT); 1343 } 1344 1345 int main(int argc, char **argv) 1346 { 1347 CU_pSuite suite = NULL; 1348 unsigned int num_failures; 1349 1350 CU_set_error_action(CUEA_ABORT); 1351 CU_initialize_registry(); 1352 1353 suite = CU_add_suite("nvmf", NULL, NULL); 1354 1355 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1356 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1357 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1358 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1359 CU_ADD_TEST(suite, test_nvmf_rdma_opts_init); 1360 CU_ADD_TEST(suite, test_nvmf_rdma_request_free_data); 1361 CU_ADD_TEST(suite, test_nvmf_rdma_update_ibv_state); 1362 1363 CU_basic_set_mode(CU_BRM_VERBOSE); 1364 CU_basic_run_tests(); 1365 num_failures = CU_get_number_of_failures(); 1366 CU_cleanup_registry(); 1367 return num_failures; 1368 } 1369