1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 uint64_t g_mr_size; 42 uint64_t g_mr_next_size; 43 struct ibv_mr g_rdma_mr; 44 45 #define RDMA_UT_UNITS_IN_MAX_IO 16 46 47 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 48 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 49 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 50 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 51 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 52 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 53 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 54 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 55 }; 56 57 SPDK_LOG_REGISTER_COMPONENT(nvmf) 58 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 59 uint64_t size, uint64_t translation), 0); 60 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 61 uint64_t size), 0); 62 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 63 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 64 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 65 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 66 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 67 68 struct spdk_trace_histories *g_trace_histories; 69 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 70 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 71 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 72 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 73 uint8_t arg1_type, const char *arg1_name)); 74 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 75 uint32_t size, uint64_t object_id, uint64_t arg1)); 76 77 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 78 struct spdk_nvmf_ctrlr_data *cdata)); 79 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 80 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 81 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 82 const struct spdk_nvme_transport_id *trid2), 0); 83 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 84 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 85 struct spdk_dif_ctx *dif_ctx), false); 86 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 87 enum spdk_nvme_transport_type trtype)); 88 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 89 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 90 91 const char * 92 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 93 { 94 switch (trtype) { 95 case SPDK_NVME_TRANSPORT_PCIE: 96 return "PCIe"; 97 case SPDK_NVME_TRANSPORT_RDMA: 98 return "RDMA"; 99 case SPDK_NVME_TRANSPORT_FC: 100 return "FC"; 101 default: 102 return NULL; 103 } 104 } 105 106 int 107 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 108 { 109 int len, i; 110 111 if (trstring == NULL) { 112 return -EINVAL; 113 } 114 115 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 116 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 117 return -EINVAL; 118 } 119 120 /* cast official trstring to uppercase version of input. */ 121 for (i = 0; i < len; i++) { 122 trid->trstring[i] = toupper(trstring[i]); 123 } 124 return 0; 125 } 126 127 uint64_t 128 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 129 { 130 if (g_mr_size != 0) { 131 *(uint32_t *)size = g_mr_size; 132 if (g_mr_next_size != 0) { 133 g_mr_size = g_mr_next_size; 134 } 135 } 136 137 return (uint64_t)&g_rdma_mr; 138 } 139 140 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 141 { 142 int i; 143 144 rdma_req->req.length = 0; 145 rdma_req->req.data_from_pool = false; 146 rdma_req->req.data = NULL; 147 rdma_req->data.wr.num_sge = 0; 148 rdma_req->data.wr.wr.rdma.remote_addr = 0; 149 rdma_req->data.wr.wr.rdma.rkey = 0; 150 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 151 152 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 153 rdma_req->req.iov[i].iov_base = 0; 154 rdma_req->req.iov[i].iov_len = 0; 155 rdma_req->req.buffers[i] = 0; 156 rdma_req->data.wr.sg_list[i].addr = 0; 157 rdma_req->data.wr.sg_list[i].length = 0; 158 rdma_req->data.wr.sg_list[i].lkey = 0; 159 } 160 rdma_req->req.iovcnt = 0; 161 } 162 163 static void 164 test_spdk_nvmf_rdma_request_parse_sgl(void) 165 { 166 struct spdk_nvmf_rdma_transport rtransport; 167 struct spdk_nvmf_rdma_device device; 168 struct spdk_nvmf_rdma_request rdma_req = {}; 169 struct spdk_nvmf_rdma_recv recv; 170 struct spdk_nvmf_rdma_poll_group group; 171 struct spdk_nvmf_rdma_qpair rqpair; 172 struct spdk_nvmf_rdma_poller poller; 173 union nvmf_c2h_msg cpl; 174 union nvmf_h2c_msg cmd; 175 struct spdk_nvme_sgl_descriptor *sgl; 176 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 177 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 178 struct spdk_nvmf_rdma_request_data data; 179 struct spdk_nvmf_transport_pg_cache_buf buffer; 180 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 181 int rc, i; 182 183 data.wr.sg_list = data.sgl; 184 STAILQ_INIT(&group.group.buf_cache); 185 group.group.buf_cache_size = 0; 186 group.group.buf_cache_count = 0; 187 group.group.transport = &rtransport.transport; 188 STAILQ_INIT(&group.retired_bufs); 189 poller.group = &group; 190 rqpair.poller = &poller; 191 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 192 193 sgl = &cmd.nvme_cmd.dptr.sgl1; 194 rdma_req.recv = &recv; 195 rdma_req.req.cmd = &cmd; 196 rdma_req.req.rsp = &cpl; 197 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 198 rdma_req.req.qpair = &rqpair.qpair; 199 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 200 201 rtransport.transport.opts = g_rdma_ut_transport_opts; 202 rtransport.data_wr_pool = NULL; 203 rtransport.transport.data_buf_pool = NULL; 204 205 device.attr.device_cap_flags = 0; 206 g_rdma_mr.lkey = 0xABCD; 207 sgl->keyed.key = 0xEEEE; 208 sgl->address = 0xFFFF; 209 rdma_req.recv->buf = (void *)0xDDDD; 210 211 /* Test 1: sgl type: keyed data block subtype: address */ 212 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 213 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 214 215 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 216 MOCK_SET(spdk_mempool_get, (void *)0x2000); 217 reset_nvmf_rdma_request(&rdma_req); 218 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 219 220 device.map = (void *)0x0; 221 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 222 CU_ASSERT(rc == 0); 223 CU_ASSERT(rdma_req.req.data_from_pool == true); 224 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 225 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 226 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 227 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 228 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 229 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 230 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 231 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 232 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 233 234 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 235 reset_nvmf_rdma_request(&rdma_req); 236 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 237 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 238 239 CU_ASSERT(rc == 0); 240 CU_ASSERT(rdma_req.req.data_from_pool == true); 241 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 242 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 243 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 244 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 245 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 246 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 247 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 248 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 249 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 250 } 251 252 /* Part 3: simple I/O one SGL larger than the transport max io size */ 253 reset_nvmf_rdma_request(&rdma_req); 254 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 255 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 256 257 CU_ASSERT(rc == -1); 258 259 /* Part 4: Pretend there are no buffer pools */ 260 MOCK_SET(spdk_mempool_get, NULL); 261 reset_nvmf_rdma_request(&rdma_req); 262 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 263 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 264 265 CU_ASSERT(rc == 0); 266 CU_ASSERT(rdma_req.req.data_from_pool == false); 267 CU_ASSERT(rdma_req.req.data == NULL); 268 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 269 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 270 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 271 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 272 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 273 274 rdma_req.recv->buf = (void *)0xDDDD; 275 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 276 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 277 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 278 279 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 280 reset_nvmf_rdma_request(&rdma_req); 281 sgl->address = 0; 282 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 283 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 284 285 CU_ASSERT(rc == 0); 286 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 287 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 288 CU_ASSERT(rdma_req.req.data_from_pool == false); 289 290 /* Part 2: I/O offset + length too large */ 291 reset_nvmf_rdma_request(&rdma_req); 292 sgl->address = rtransport.transport.opts.in_capsule_data_size; 293 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 294 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 295 296 CU_ASSERT(rc == -1); 297 298 /* Part 3: I/O too large */ 299 reset_nvmf_rdma_request(&rdma_req); 300 sgl->address = 0; 301 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 302 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 303 304 CU_ASSERT(rc == -1); 305 306 /* Test 3: Multi SGL */ 307 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 308 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 309 sgl->address = 0; 310 rdma_req.recv->buf = (void *)&sgl_desc; 311 MOCK_SET(spdk_mempool_get, &data); 312 313 /* part 1: 2 segments each with 1 wr. */ 314 reset_nvmf_rdma_request(&rdma_req); 315 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 316 for (i = 0; i < 2; i++) { 317 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 318 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 319 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 320 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 321 sgl_desc[i].keyed.key = 0x44; 322 } 323 324 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 325 326 CU_ASSERT(rc == 0); 327 CU_ASSERT(rdma_req.req.data_from_pool == true); 328 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 329 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 330 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 331 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 332 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 333 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 334 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 335 CU_ASSERT(data.wr.num_sge == 1); 336 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 337 338 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 339 reset_nvmf_rdma_request(&rdma_req); 340 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 341 for (i = 0; i < 2; i++) { 342 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 343 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 344 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 345 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 346 sgl_desc[i].keyed.key = 0x44; 347 } 348 349 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 350 351 CU_ASSERT(rc == 0); 352 CU_ASSERT(rdma_req.req.data_from_pool == true); 353 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 354 CU_ASSERT(rdma_req.req.iovcnt == 16); 355 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 356 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 357 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 358 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 359 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 360 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 361 CU_ASSERT(data.wr.num_sge == 8); 362 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 363 364 /* part 3: 2 segments, one very large, one very small */ 365 reset_nvmf_rdma_request(&rdma_req); 366 for (i = 0; i < 2; i++) { 367 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 368 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 369 sgl_desc[i].keyed.key = 0x44; 370 } 371 372 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 373 rtransport.transport.opts.io_unit_size / 2; 374 sgl_desc[0].address = 0x4000; 375 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 376 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 377 rtransport.transport.opts.io_unit_size / 2; 378 379 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 380 381 CU_ASSERT(rc == 0); 382 CU_ASSERT(rdma_req.req.data_from_pool == true); 383 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 384 CU_ASSERT(rdma_req.req.iovcnt == 17); 385 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 386 for (i = 0; i < 15; i++) { 387 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 388 } 389 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 390 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 391 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 392 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 393 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 394 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 395 rtransport.transport.opts.io_unit_size / 2); 396 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 397 CU_ASSERT(data.wr.num_sge == 1); 398 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 399 400 /* Test 4: use PG buffer cache */ 401 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 402 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 403 sgl->address = 0xFFFF; 404 rdma_req.recv->buf = (void *)0xDDDD; 405 g_rdma_mr.lkey = 0xABCD; 406 sgl->keyed.key = 0xEEEE; 407 408 for (i = 0; i < 4; i++) { 409 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 410 } 411 412 /* part 1: use the four buffers from the pg cache */ 413 group.group.buf_cache_size = 4; 414 group.group.buf_cache_count = 4; 415 MOCK_SET(spdk_mempool_get, (void *)0x2000); 416 reset_nvmf_rdma_request(&rdma_req); 417 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 418 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 419 420 SPDK_CU_ASSERT_FATAL(rc == 0); 421 CU_ASSERT(rdma_req.req.data_from_pool == true); 422 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 423 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 424 ~NVMF_DATA_BUFFER_MASK)); 425 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 426 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 427 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 428 CU_ASSERT(group.group.buf_cache_count == 0); 429 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 430 for (i = 0; i < 4; i++) { 431 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 432 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 433 ~NVMF_DATA_BUFFER_MASK)); 434 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 435 } 436 437 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 438 reset_nvmf_rdma_request(&rdma_req); 439 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 440 441 SPDK_CU_ASSERT_FATAL(rc == 0); 442 CU_ASSERT(rdma_req.req.data_from_pool == true); 443 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 444 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 445 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 446 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 447 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 448 CU_ASSERT(group.group.buf_cache_count == 0); 449 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 450 for (i = 0; i < 4; i++) { 451 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 452 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 453 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 454 CU_ASSERT(group.group.buf_cache_count == 0); 455 } 456 457 /* part 3: half and half */ 458 group.group.buf_cache_count = 2; 459 460 for (i = 0; i < 2; i++) { 461 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 462 } 463 reset_nvmf_rdma_request(&rdma_req); 464 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 465 466 SPDK_CU_ASSERT_FATAL(rc == 0); 467 CU_ASSERT(rdma_req.req.data_from_pool == true); 468 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 469 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 470 ~NVMF_DATA_BUFFER_MASK)); 471 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 472 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 473 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 474 CU_ASSERT(group.group.buf_cache_count == 0); 475 for (i = 0; i < 2; i++) { 476 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 477 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 478 ~NVMF_DATA_BUFFER_MASK)); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 480 } 481 for (i = 2; i < 4; i++) { 482 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 483 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 484 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 485 } 486 487 reset_nvmf_rdma_request(&rdma_req); 488 /* Test 5 dealing with a buffer split over two Memory Regions */ 489 MOCK_SET(spdk_mempool_get, (void *)&buffer); 490 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 491 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 492 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 493 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 494 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 495 496 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 497 SPDK_CU_ASSERT_FATAL(rc == 0); 498 CU_ASSERT(rdma_req.req.data_from_pool == true); 499 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 500 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 501 ~NVMF_DATA_BUFFER_MASK)); 502 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 503 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 504 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 505 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 506 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 507 ~NVMF_DATA_BUFFER_MASK)); 508 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 509 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 510 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 511 CU_ASSERT(buffer_ptr == &buffer); 512 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 513 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 514 g_mr_size = 0; 515 g_mr_next_size = 0; 516 517 reset_nvmf_rdma_request(&rdma_req); 518 } 519 520 static struct spdk_nvmf_rdma_recv * 521 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 522 { 523 struct spdk_nvmf_rdma_recv *rdma_recv; 524 union nvmf_h2c_msg *cmd; 525 struct spdk_nvme_sgl_descriptor *sgl; 526 527 rdma_recv = calloc(1, sizeof(*rdma_recv)); 528 rdma_recv->qpair = rqpair; 529 cmd = calloc(1, sizeof(*cmd)); 530 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 531 cmd->nvme_cmd.opc = opc; 532 sgl = &cmd->nvme_cmd.dptr.sgl1; 533 sgl->keyed.key = 0xEEEE; 534 sgl->address = 0xFFFF; 535 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 536 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 537 sgl->keyed.length = 1; 538 539 return rdma_recv; 540 } 541 542 static void 543 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 544 { 545 free((void *)rdma_recv->sgl[0].addr); 546 free(rdma_recv); 547 } 548 549 static struct spdk_nvmf_rdma_request * 550 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 551 struct spdk_nvmf_rdma_recv *rdma_recv) 552 { 553 struct spdk_nvmf_rdma_request *rdma_req; 554 union nvmf_c2h_msg *cpl; 555 556 rdma_req = calloc(1, sizeof(*rdma_req)); 557 rdma_req->recv = rdma_recv; 558 rdma_req->req.qpair = &rqpair->qpair; 559 rdma_req->state = RDMA_REQUEST_STATE_NEW; 560 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 561 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 562 cpl = calloc(1, sizeof(*cpl)); 563 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 564 rdma_req->req.rsp = cpl; 565 566 return rdma_req; 567 } 568 569 static void 570 free_req(struct spdk_nvmf_rdma_request *rdma_req) 571 { 572 free((void *)rdma_req->rsp.sgl[0].addr); 573 free(rdma_req); 574 } 575 576 static void 577 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 578 struct spdk_nvmf_rdma_poller *poller, 579 struct spdk_nvmf_rdma_device *device, 580 struct spdk_nvmf_rdma_resources *resources, 581 struct spdk_nvmf_transport *transport) 582 { 583 memset(rqpair, 0, sizeof(*rqpair)); 584 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 585 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 586 rqpair->poller = poller; 587 rqpair->device = device; 588 rqpair->resources = resources; 589 rqpair->qpair.qid = 1; 590 rqpair->ibv_state = IBV_QPS_RTS; 591 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 592 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 593 rqpair->max_send_depth = 16; 594 rqpair->max_read_depth = 16; 595 rqpair->qpair.transport = transport; 596 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 597 } 598 599 static void 600 poller_reset(struct spdk_nvmf_rdma_poller *poller, 601 struct spdk_nvmf_rdma_poll_group *group) 602 { 603 memset(poller, 0, sizeof(*poller)); 604 STAILQ_INIT(&poller->qpairs_pending_recv); 605 STAILQ_INIT(&poller->qpairs_pending_send); 606 poller->group = group; 607 } 608 609 static void 610 test_spdk_nvmf_rdma_request_process(void) 611 { 612 struct spdk_nvmf_rdma_transport rtransport = {}; 613 struct spdk_nvmf_rdma_poll_group group = {}; 614 struct spdk_nvmf_rdma_poller poller = {}; 615 struct spdk_nvmf_rdma_device device = {}; 616 struct spdk_nvmf_rdma_resources resources = {}; 617 struct spdk_nvmf_rdma_qpair rqpair = {}; 618 struct spdk_nvmf_rdma_recv *rdma_recv; 619 struct spdk_nvmf_rdma_request *rdma_req; 620 bool progress; 621 622 STAILQ_INIT(&group.group.buf_cache); 623 STAILQ_INIT(&group.group.pending_buf_queue); 624 group.group.buf_cache_size = 0; 625 group.group.buf_cache_count = 0; 626 poller_reset(&poller, &group); 627 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 628 629 rtransport.transport.opts = g_rdma_ut_transport_opts; 630 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 631 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 632 sizeof(struct spdk_nvmf_rdma_request_data), 633 0, 0); 634 MOCK_CLEAR(spdk_mempool_get); 635 636 device.attr.device_cap_flags = 0; 637 device.map = (void *)0x0; 638 g_rdma_mr.lkey = 0xABCD; 639 640 /* Test 1: single SGL READ request */ 641 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 642 rdma_req = create_req(&rqpair, rdma_recv); 643 rqpair.current_recv_depth = 1; 644 /* NEW -> EXECUTING */ 645 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 646 CU_ASSERT(progress == true); 647 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 648 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 649 /* EXECUTED -> TRANSFERRING_C2H */ 650 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 651 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 652 CU_ASSERT(progress == true); 653 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 654 CU_ASSERT(rdma_req->recv == NULL); 655 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 656 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 657 /* COMPLETED -> FREE */ 658 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 659 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 660 CU_ASSERT(progress == true); 661 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 662 663 free_recv(rdma_recv); 664 free_req(rdma_req); 665 poller_reset(&poller, &group); 666 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 667 668 /* Test 2: single SGL WRITE request */ 669 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 670 rdma_req = create_req(&rqpair, rdma_recv); 671 rqpair.current_recv_depth = 1; 672 /* NEW -> TRANSFERRING_H2C */ 673 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 674 CU_ASSERT(progress == true); 675 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 676 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 677 STAILQ_INIT(&poller.qpairs_pending_send); 678 /* READY_TO_EXECUTE -> EXECUTING */ 679 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 680 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 681 CU_ASSERT(progress == true); 682 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 683 /* EXECUTED -> COMPLETING */ 684 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 685 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 686 CU_ASSERT(progress == true); 687 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 688 CU_ASSERT(rdma_req->recv == NULL); 689 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 690 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 691 /* COMPLETED -> FREE */ 692 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 693 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 694 CU_ASSERT(progress == true); 695 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 696 697 free_recv(rdma_recv); 698 free_req(rdma_req); 699 poller_reset(&poller, &group); 700 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 701 702 /* Test 3: WRITE+WRITE ibv_send batching */ 703 { 704 struct spdk_nvmf_rdma_recv *recv1, *recv2; 705 struct spdk_nvmf_rdma_request *req1, *req2; 706 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 707 req1 = create_req(&rqpair, recv1); 708 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 709 req2 = create_req(&rqpair, recv2); 710 711 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 712 rqpair.current_recv_depth = 1; 713 nvmf_rdma_request_process(&rtransport, req1); 714 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 715 716 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 717 rqpair.current_recv_depth = 2; 718 nvmf_rdma_request_process(&rtransport, req2); 719 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 720 721 STAILQ_INIT(&poller.qpairs_pending_send); 722 723 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 724 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 725 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 726 nvmf_rdma_request_process(&rtransport, req1); 727 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 728 /* WRITE 1: EXECUTED -> COMPLETING */ 729 req1->state = RDMA_REQUEST_STATE_EXECUTED; 730 nvmf_rdma_request_process(&rtransport, req1); 731 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 732 STAILQ_INIT(&poller.qpairs_pending_send); 733 /* WRITE 1: COMPLETED -> FREE */ 734 req1->state = RDMA_REQUEST_STATE_COMPLETED; 735 nvmf_rdma_request_process(&rtransport, req1); 736 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 737 738 /* Now WRITE 2 has finished reading and completes */ 739 /* WRITE 2: COMPLETED -> FREE */ 740 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 741 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 742 nvmf_rdma_request_process(&rtransport, req2); 743 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 744 /* WRITE 1: EXECUTED -> COMPLETING */ 745 req2->state = RDMA_REQUEST_STATE_EXECUTED; 746 nvmf_rdma_request_process(&rtransport, req2); 747 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 748 STAILQ_INIT(&poller.qpairs_pending_send); 749 /* WRITE 1: COMPLETED -> FREE */ 750 req2->state = RDMA_REQUEST_STATE_COMPLETED; 751 nvmf_rdma_request_process(&rtransport, req2); 752 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 753 754 free_recv(recv1); 755 free_req(req1); 756 free_recv(recv2); 757 free_req(req2); 758 poller_reset(&poller, &group); 759 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 760 } 761 762 /* Test 4, invalid command, check xfer type */ 763 { 764 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 765 struct spdk_nvmf_rdma_request *rdma_req_inv; 766 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 767 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 768 769 rdma_recv_inv = create_recv(&rqpair, opc); 770 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 771 772 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 773 rqpair.current_recv_depth = 1; 774 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 775 CU_ASSERT(progress == true); 776 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 777 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 778 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 779 780 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 781 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 782 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 783 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 784 785 free_recv(rdma_recv_inv); 786 free_req(rdma_req_inv); 787 poller_reset(&poller, &group); 788 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 789 } 790 791 spdk_mempool_free(rtransport.transport.data_buf_pool); 792 spdk_mempool_free(rtransport.data_wr_pool); 793 } 794 795 #define TEST_GROUPS_COUNT 5 796 static void 797 test_nvmf_rdma_get_optimal_poll_group(void) 798 { 799 struct spdk_nvmf_rdma_transport rtransport = {}; 800 struct spdk_nvmf_transport *transport = &rtransport.transport; 801 struct spdk_nvmf_rdma_qpair rqpair = {}; 802 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 803 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 804 struct spdk_nvmf_transport_poll_group *result; 805 uint32_t i; 806 807 rqpair.qpair.transport = transport; 808 pthread_mutex_init(&rtransport.lock, NULL); 809 TAILQ_INIT(&rtransport.poll_groups); 810 811 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 812 groups[i] = nvmf_rdma_poll_group_create(transport); 813 CU_ASSERT(groups[i] != NULL); 814 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 815 groups[i]->transport = transport; 816 } 817 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 818 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 819 820 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 821 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 822 rqpair.qpair.qid = 0; 823 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 824 CU_ASSERT(result == groups[i]); 825 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 826 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 827 828 rqpair.qpair.qid = 1; 829 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 830 CU_ASSERT(result == groups[i]); 831 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 832 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 833 } 834 /* wrap around, admin/io pg point to the first pg 835 Destroy all poll groups except of the last one */ 836 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 837 nvmf_rdma_poll_group_destroy(groups[i]); 838 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 839 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 840 } 841 842 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 843 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 844 845 /* Check that pointers to the next admin/io poll groups are not changed */ 846 rqpair.qpair.qid = 0; 847 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 848 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 849 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 850 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 851 852 rqpair.qpair.qid = 1; 853 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 854 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 855 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 856 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 857 858 /* Remove the last poll group, check that pointers are NULL */ 859 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 860 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 861 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 862 863 /* Request optimal poll group, result must be NULL */ 864 rqpair.qpair.qid = 0; 865 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 866 CU_ASSERT(result == NULL); 867 868 rqpair.qpair.qid = 1; 869 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 870 CU_ASSERT(result == NULL); 871 872 pthread_mutex_destroy(&rtransport.lock); 873 } 874 #undef TEST_GROUPS_COUNT 875 876 static void 877 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 878 { 879 struct spdk_nvmf_rdma_transport rtransport; 880 struct spdk_nvmf_rdma_device device; 881 struct spdk_nvmf_rdma_request rdma_req = {}; 882 struct spdk_nvmf_rdma_recv recv; 883 struct spdk_nvmf_rdma_poll_group group; 884 struct spdk_nvmf_rdma_qpair rqpair; 885 struct spdk_nvmf_rdma_poller poller; 886 union nvmf_c2h_msg cpl; 887 union nvmf_h2c_msg cmd; 888 struct spdk_nvme_sgl_descriptor *sgl; 889 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 890 struct spdk_nvmf_rdma_request_data data; 891 char data2_buffer[8192]; 892 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 893 struct spdk_nvmf_transport_pg_cache_buf buffer; 894 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 895 const uint32_t data_bs = 512; 896 const uint32_t md_size = 8; 897 int rc, i; 898 void *aligned_buffer; 899 900 data.wr.sg_list = data.sgl; 901 STAILQ_INIT(&group.group.buf_cache); 902 group.group.buf_cache_size = 0; 903 group.group.buf_cache_count = 0; 904 group.group.transport = &rtransport.transport; 905 STAILQ_INIT(&group.retired_bufs); 906 poller.group = &group; 907 rqpair.poller = &poller; 908 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 909 910 sgl = &cmd.nvme_cmd.dptr.sgl1; 911 rdma_req.recv = &recv; 912 rdma_req.req.cmd = &cmd; 913 rdma_req.req.rsp = &cpl; 914 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 915 rdma_req.req.qpair = &rqpair.qpair; 916 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 917 918 rtransport.transport.opts = g_rdma_ut_transport_opts; 919 rtransport.data_wr_pool = NULL; 920 rtransport.transport.data_buf_pool = NULL; 921 922 device.attr.device_cap_flags = 0; 923 device.map = NULL; 924 g_rdma_mr.lkey = 0xABCD; 925 sgl->keyed.key = 0xEEEE; 926 sgl->address = 0xFFFF; 927 rdma_req.recv->buf = (void *)0xDDDD; 928 929 /* Test 1: sgl type: keyed data block subtype: address */ 930 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 931 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 932 933 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 934 MOCK_SET(spdk_mempool_get, (void *)0x2000); 935 reset_nvmf_rdma_request(&rdma_req); 936 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 937 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 938 0, 0, 0, 0, 0); 939 rdma_req.req.dif.dif_insert_or_strip = true; 940 rtransport.transport.opts.io_unit_size = data_bs * 8; 941 sgl->keyed.length = data_bs * 4; 942 943 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 944 945 CU_ASSERT(rc == 0); 946 CU_ASSERT(rdma_req.req.data_from_pool == true); 947 CU_ASSERT(rdma_req.req.length == data_bs * 4); 948 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 949 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 950 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 951 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 952 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 953 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 954 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 955 956 for (i = 0; i < 4; ++i) { 957 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 958 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 959 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 960 } 961 962 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 963 block size 512 */ 964 MOCK_SET(spdk_mempool_get, (void *)0x2000); 965 reset_nvmf_rdma_request(&rdma_req); 966 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 967 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 968 0, 0, 0, 0, 0); 969 rdma_req.req.dif.dif_insert_or_strip = true; 970 rtransport.transport.opts.io_unit_size = data_bs * 4; 971 sgl->keyed.length = data_bs * 4; 972 973 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 974 975 CU_ASSERT(rc == 0); 976 CU_ASSERT(rdma_req.req.data_from_pool == true); 977 CU_ASSERT(rdma_req.req.length == data_bs * 4); 978 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 979 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 980 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 981 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 982 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 983 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 984 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 985 986 for (i = 0; i < 3; ++i) { 987 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 988 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 989 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 990 } 991 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 992 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 993 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 994 995 /* 2nd buffer consumed */ 996 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 997 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 998 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 999 1000 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 1001 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1002 reset_nvmf_rdma_request(&rdma_req); 1003 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1004 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1005 0, 0, 0, 0, 0); 1006 rdma_req.req.dif.dif_insert_or_strip = true; 1007 rtransport.transport.opts.io_unit_size = data_bs; 1008 sgl->keyed.length = data_bs; 1009 1010 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1011 1012 CU_ASSERT(rc == 0); 1013 CU_ASSERT(rdma_req.req.data_from_pool == true); 1014 CU_ASSERT(rdma_req.req.length == data_bs); 1015 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1016 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1017 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1018 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1019 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1020 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1021 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1022 1023 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1024 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1025 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1026 1027 CU_ASSERT(rdma_req.req.iovcnt == 2); 1028 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1029 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1030 /* 2nd buffer consumed for metadata */ 1031 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1032 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1033 1034 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1035 block size 512 */ 1036 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1037 reset_nvmf_rdma_request(&rdma_req); 1038 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1039 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1040 0, 0, 0, 0, 0); 1041 rdma_req.req.dif.dif_insert_or_strip = true; 1042 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1043 sgl->keyed.length = data_bs * 4; 1044 1045 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1046 1047 CU_ASSERT(rc == 0); 1048 CU_ASSERT(rdma_req.req.data_from_pool == true); 1049 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1050 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1051 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1052 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1053 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1054 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1055 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1056 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1057 1058 for (i = 0; i < 4; ++i) { 1059 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1060 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1061 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1062 } 1063 1064 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1065 block size 512 */ 1066 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1067 reset_nvmf_rdma_request(&rdma_req); 1068 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1069 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1070 0, 0, 0, 0, 0); 1071 rdma_req.req.dif.dif_insert_or_strip = true; 1072 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1073 sgl->keyed.length = data_bs * 4; 1074 1075 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1076 1077 CU_ASSERT(rc == 0); 1078 CU_ASSERT(rdma_req.req.data_from_pool == true); 1079 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1080 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1081 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1082 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1083 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1084 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1085 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1086 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1087 1088 for (i = 0; i < 2; ++i) { 1089 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1090 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1091 } 1092 for (i = 0; i < 2; ++i) { 1093 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1094 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1095 } 1096 1097 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1098 block size 512 */ 1099 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1100 reset_nvmf_rdma_request(&rdma_req); 1101 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1102 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1103 0, 0, 0, 0, 0); 1104 rdma_req.req.dif.dif_insert_or_strip = true; 1105 rtransport.transport.opts.io_unit_size = data_bs * 4; 1106 sgl->keyed.length = data_bs * 6; 1107 1108 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1109 1110 CU_ASSERT(rc == 0); 1111 CU_ASSERT(rdma_req.req.data_from_pool == true); 1112 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1113 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1114 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1115 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1116 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1117 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1118 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1119 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1120 1121 for (i = 0; i < 3; ++i) { 1122 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1123 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1124 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1125 } 1126 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1127 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1128 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1129 1130 /* 2nd IO buffer consumed */ 1131 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1132 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1133 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1134 1135 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1136 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1137 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1138 1139 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1140 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1141 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1142 1143 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1144 one WR can hold. Additional WR is chained */ 1145 MOCK_SET(spdk_mempool_get, data2_buffer); 1146 aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) & 1147 ~NVMF_DATA_BUFFER_MASK); 1148 reset_nvmf_rdma_request(&rdma_req); 1149 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1150 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1151 0, 0, 0, 0, 0); 1152 rdma_req.req.dif.dif_insert_or_strip = true; 1153 rtransport.transport.opts.io_unit_size = data_bs * 16; 1154 sgl->keyed.length = data_bs * 16; 1155 1156 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1157 1158 CU_ASSERT(rc == 0); 1159 CU_ASSERT(rdma_req.req.data_from_pool == true); 1160 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1161 CU_ASSERT(rdma_req.req.iovcnt == 2); 1162 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1163 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1164 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1165 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1166 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1167 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1168 /* additional wr from pool */ 1169 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1170 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1171 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1172 1173 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1174 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1175 reset_nvmf_rdma_request(&rdma_req); 1176 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1177 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1178 0, 0, 0, 0, 0); 1179 rdma_req.req.dif.dif_insert_or_strip = true; 1180 rtransport.transport.opts.io_unit_size = 516; 1181 sgl->keyed.length = data_bs * 2; 1182 1183 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1184 1185 CU_ASSERT(rc == 0); 1186 CU_ASSERT(rdma_req.req.data_from_pool == true); 1187 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1188 CU_ASSERT(rdma_req.req.iovcnt == 3); 1189 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1190 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1191 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1192 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1193 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1194 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1195 1196 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1197 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1198 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1199 1200 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1201 is located at the beginning of that buffer */ 1202 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1203 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1204 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1205 1206 /* Test 9 dealing with a buffer split over two Memory Regions */ 1207 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1208 reset_nvmf_rdma_request(&rdma_req); 1209 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1210 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1211 0, 0, 0, 0, 0); 1212 rdma_req.req.dif.dif_insert_or_strip = true; 1213 rtransport.transport.opts.io_unit_size = data_bs * 4; 1214 sgl->keyed.length = data_bs * 2; 1215 g_mr_size = data_bs; 1216 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1217 1218 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1219 SPDK_CU_ASSERT_FATAL(rc == 0); 1220 CU_ASSERT(rdma_req.req.data_from_pool == true); 1221 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1222 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1223 ~NVMF_DATA_BUFFER_MASK)); 1224 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1225 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1226 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1227 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1228 for (i = 0; i < 2; i++) { 1229 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1230 (data_bs + md_size)); 1231 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1232 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1233 } 1234 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1235 CU_ASSERT(buffer_ptr == &buffer); 1236 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1237 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1238 g_mr_size = 0; 1239 g_mr_next_size = 0; 1240 1241 /* Test 2: Multi SGL */ 1242 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1243 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1244 sgl->address = 0; 1245 rdma_req.recv->buf = (void *)&sgl_desc; 1246 MOCK_SET(spdk_mempool_get, &data); 1247 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1248 ~NVMF_DATA_BUFFER_MASK); 1249 1250 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1251 reset_nvmf_rdma_request(&rdma_req); 1252 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1253 SPDK_DIF_TYPE1, 1254 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1255 rdma_req.req.dif.dif_insert_or_strip = true; 1256 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1257 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1258 1259 for (i = 0; i < 2; i++) { 1260 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1261 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1262 sgl_desc[i].keyed.length = data_bs * 4; 1263 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1264 sgl_desc[i].keyed.key = 0x44; 1265 } 1266 1267 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1268 1269 CU_ASSERT(rc == 0); 1270 CU_ASSERT(rdma_req.req.data_from_pool == true); 1271 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1272 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1273 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1274 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1275 for (i = 0; i < 4; ++i) { 1276 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1277 (data_bs + md_size)); 1278 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1279 } 1280 1281 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1282 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1283 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1284 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1285 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1286 CU_ASSERT(data.wr.num_sge == 4); 1287 for (i = 0; i < 4; ++i) { 1288 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1289 (data_bs + md_size)); 1290 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1291 } 1292 1293 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1294 } 1295 1296 int main(int argc, char **argv) 1297 { 1298 CU_pSuite suite = NULL; 1299 unsigned int num_failures; 1300 1301 CU_set_error_action(CUEA_ABORT); 1302 CU_initialize_registry(); 1303 1304 suite = CU_add_suite("nvmf", NULL, NULL); 1305 1306 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1307 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1308 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1309 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1310 1311 CU_basic_set_mode(CU_BRM_VERBOSE); 1312 CU_basic_run_tests(); 1313 num_failures = CU_get_number_of_failures(); 1314 CU_cleanup_registry(); 1315 return num_failures; 1316 } 1317