1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 uint64_t g_mr_size; 42 uint64_t g_mr_next_size; 43 struct ibv_mr g_rdma_mr; 44 45 #define RDMA_UT_UNITS_IN_MAX_IO 16 46 47 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 48 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 49 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 50 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 51 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 52 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 53 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 54 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 55 }; 56 57 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 58 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 59 uint64_t size, uint64_t translation), 0); 60 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 61 uint64_t size), 0); 62 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 63 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 64 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 65 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 66 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 67 68 struct spdk_trace_histories *g_trace_histories; 69 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 70 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 71 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 72 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 73 uint8_t arg1_type, const char *arg1_name)); 74 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 75 uint32_t size, uint64_t object_id, uint64_t arg1)); 76 77 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 78 struct spdk_nvmf_ctrlr_data *cdata)); 79 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 80 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 81 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 82 const struct spdk_nvme_transport_id *trid2), 0); 83 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 84 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 85 struct spdk_dif_ctx *dif_ctx), false); 86 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 87 enum spdk_nvme_transport_type trtype)); 88 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 89 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 90 91 const char * 92 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 93 { 94 switch (trtype) { 95 case SPDK_NVME_TRANSPORT_PCIE: 96 return "PCIe"; 97 case SPDK_NVME_TRANSPORT_RDMA: 98 return "RDMA"; 99 case SPDK_NVME_TRANSPORT_FC: 100 return "FC"; 101 default: 102 return NULL; 103 } 104 } 105 106 int 107 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 108 { 109 int len, i; 110 111 if (trstring == NULL) { 112 return -EINVAL; 113 } 114 115 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 116 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 117 return -EINVAL; 118 } 119 120 /* cast official trstring to uppercase version of input. */ 121 for (i = 0; i < len; i++) { 122 trid->trstring[i] = toupper(trstring[i]); 123 } 124 return 0; 125 } 126 127 uint64_t 128 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 129 { 130 if (g_mr_size != 0) { 131 *(uint32_t *)size = g_mr_size; 132 if (g_mr_next_size != 0) { 133 g_mr_size = g_mr_next_size; 134 } 135 } 136 137 return (uint64_t)&g_rdma_mr; 138 } 139 140 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 141 { 142 int i; 143 144 rdma_req->req.length = 0; 145 rdma_req->req.data_from_pool = false; 146 rdma_req->req.data = NULL; 147 rdma_req->data.wr.num_sge = 0; 148 rdma_req->data.wr.wr.rdma.remote_addr = 0; 149 rdma_req->data.wr.wr.rdma.rkey = 0; 150 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 151 152 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 153 rdma_req->req.iov[i].iov_base = 0; 154 rdma_req->req.iov[i].iov_len = 0; 155 rdma_req->req.buffers[i] = 0; 156 rdma_req->data.wr.sg_list[i].addr = 0; 157 rdma_req->data.wr.sg_list[i].length = 0; 158 rdma_req->data.wr.sg_list[i].lkey = 0; 159 } 160 rdma_req->req.iovcnt = 0; 161 } 162 163 static void 164 test_spdk_nvmf_rdma_request_parse_sgl(void) 165 { 166 struct spdk_nvmf_rdma_transport rtransport; 167 struct spdk_nvmf_rdma_device device; 168 struct spdk_nvmf_rdma_request rdma_req = {}; 169 struct spdk_nvmf_rdma_recv recv; 170 struct spdk_nvmf_rdma_poll_group group; 171 struct spdk_nvmf_rdma_qpair rqpair; 172 struct spdk_nvmf_rdma_poller poller; 173 union nvmf_c2h_msg cpl; 174 union nvmf_h2c_msg cmd; 175 struct spdk_nvme_sgl_descriptor *sgl; 176 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 177 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 178 struct spdk_nvmf_rdma_request_data data; 179 struct spdk_nvmf_transport_pg_cache_buf buffer; 180 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 181 int rc, i; 182 183 data.wr.sg_list = data.sgl; 184 STAILQ_INIT(&group.group.buf_cache); 185 group.group.buf_cache_size = 0; 186 group.group.buf_cache_count = 0; 187 group.group.transport = &rtransport.transport; 188 STAILQ_INIT(&group.retired_bufs); 189 poller.group = &group; 190 rqpair.poller = &poller; 191 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 192 193 sgl = &cmd.nvme_cmd.dptr.sgl1; 194 rdma_req.recv = &recv; 195 rdma_req.req.cmd = &cmd; 196 rdma_req.req.rsp = &cpl; 197 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 198 rdma_req.req.qpair = &rqpair.qpair; 199 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 200 201 rtransport.transport.opts = g_rdma_ut_transport_opts; 202 rtransport.data_wr_pool = NULL; 203 rtransport.transport.data_buf_pool = NULL; 204 205 device.attr.device_cap_flags = 0; 206 g_rdma_mr.lkey = 0xABCD; 207 sgl->keyed.key = 0xEEEE; 208 sgl->address = 0xFFFF; 209 rdma_req.recv->buf = (void *)0xDDDD; 210 211 /* Test 1: sgl type: keyed data block subtype: address */ 212 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 213 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 214 215 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 216 MOCK_SET(spdk_mempool_get, (void *)0x2000); 217 reset_nvmf_rdma_request(&rdma_req); 218 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 219 220 device.map = (void *)0x0; 221 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 222 CU_ASSERT(rc == 0); 223 CU_ASSERT(rdma_req.req.data_from_pool == true); 224 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 225 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 226 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 227 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 228 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 229 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 230 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 231 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 232 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 233 234 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 235 reset_nvmf_rdma_request(&rdma_req); 236 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 237 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 238 239 CU_ASSERT(rc == 0); 240 CU_ASSERT(rdma_req.req.data_from_pool == true); 241 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 242 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 243 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 244 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 245 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 246 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 247 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 248 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 249 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 250 } 251 252 /* Part 3: simple I/O one SGL larger than the transport max io size */ 253 reset_nvmf_rdma_request(&rdma_req); 254 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 255 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 256 257 CU_ASSERT(rc == -1); 258 259 /* Part 4: Pretend there are no buffer pools */ 260 MOCK_SET(spdk_mempool_get, NULL); 261 reset_nvmf_rdma_request(&rdma_req); 262 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 263 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 264 265 CU_ASSERT(rc == 0); 266 CU_ASSERT(rdma_req.req.data_from_pool == false); 267 CU_ASSERT(rdma_req.req.data == NULL); 268 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 269 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 270 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 271 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 272 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 273 274 rdma_req.recv->buf = (void *)0xDDDD; 275 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 276 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 277 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 278 279 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 280 reset_nvmf_rdma_request(&rdma_req); 281 sgl->address = 0; 282 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 283 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 284 285 CU_ASSERT(rc == 0); 286 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 287 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 288 CU_ASSERT(rdma_req.req.data_from_pool == false); 289 290 /* Part 2: I/O offset + length too large */ 291 reset_nvmf_rdma_request(&rdma_req); 292 sgl->address = rtransport.transport.opts.in_capsule_data_size; 293 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 294 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 295 296 CU_ASSERT(rc == -1); 297 298 /* Part 3: I/O too large */ 299 reset_nvmf_rdma_request(&rdma_req); 300 sgl->address = 0; 301 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 302 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 303 304 CU_ASSERT(rc == -1); 305 306 /* Test 3: Multi SGL */ 307 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 308 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 309 sgl->address = 0; 310 rdma_req.recv->buf = (void *)&sgl_desc; 311 MOCK_SET(spdk_mempool_get, &data); 312 313 /* part 1: 2 segments each with 1 wr. */ 314 reset_nvmf_rdma_request(&rdma_req); 315 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 316 for (i = 0; i < 2; i++) { 317 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 318 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 319 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 320 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 321 sgl_desc[i].keyed.key = 0x44; 322 } 323 324 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 325 326 CU_ASSERT(rc == 0); 327 CU_ASSERT(rdma_req.req.data_from_pool == true); 328 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 329 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 330 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 331 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 332 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 333 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 334 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 335 CU_ASSERT(data.wr.num_sge == 1); 336 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 337 338 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 339 reset_nvmf_rdma_request(&rdma_req); 340 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 341 for (i = 0; i < 2; i++) { 342 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 343 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 344 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 345 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 346 sgl_desc[i].keyed.key = 0x44; 347 } 348 349 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 350 351 CU_ASSERT(rc == 0); 352 CU_ASSERT(rdma_req.req.data_from_pool == true); 353 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 354 CU_ASSERT(rdma_req.req.iovcnt == 16); 355 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 356 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 357 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 358 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 359 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 360 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 361 CU_ASSERT(data.wr.num_sge == 8); 362 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 363 364 /* part 3: 2 segments, one very large, one very small */ 365 reset_nvmf_rdma_request(&rdma_req); 366 for (i = 0; i < 2; i++) { 367 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 368 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 369 sgl_desc[i].keyed.key = 0x44; 370 } 371 372 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 373 rtransport.transport.opts.io_unit_size / 2; 374 sgl_desc[0].address = 0x4000; 375 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 376 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 377 rtransport.transport.opts.io_unit_size / 2; 378 379 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 380 381 CU_ASSERT(rc == 0); 382 CU_ASSERT(rdma_req.req.data_from_pool == true); 383 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 384 CU_ASSERT(rdma_req.req.iovcnt == 17); 385 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 386 for (i = 0; i < 15; i++) { 387 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 388 } 389 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 390 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 391 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 392 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 393 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 394 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 395 rtransport.transport.opts.io_unit_size / 2); 396 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 397 CU_ASSERT(data.wr.num_sge == 1); 398 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 399 400 /* Test 4: use PG buffer cache */ 401 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 402 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 403 sgl->address = 0xFFFF; 404 rdma_req.recv->buf = (void *)0xDDDD; 405 g_rdma_mr.lkey = 0xABCD; 406 sgl->keyed.key = 0xEEEE; 407 408 for (i = 0; i < 4; i++) { 409 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 410 } 411 412 /* part 1: use the four buffers from the pg cache */ 413 group.group.buf_cache_size = 4; 414 group.group.buf_cache_count = 4; 415 MOCK_SET(spdk_mempool_get, (void *)0x2000); 416 reset_nvmf_rdma_request(&rdma_req); 417 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 418 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 419 420 SPDK_CU_ASSERT_FATAL(rc == 0); 421 CU_ASSERT(rdma_req.req.data_from_pool == true); 422 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 423 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 424 ~NVMF_DATA_BUFFER_MASK)); 425 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 426 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 427 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 428 CU_ASSERT(group.group.buf_cache_count == 0); 429 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 430 for (i = 0; i < 4; i++) { 431 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 432 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 433 ~NVMF_DATA_BUFFER_MASK)); 434 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 435 } 436 437 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 438 reset_nvmf_rdma_request(&rdma_req); 439 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 440 441 SPDK_CU_ASSERT_FATAL(rc == 0); 442 CU_ASSERT(rdma_req.req.data_from_pool == true); 443 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 444 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 445 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 446 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 447 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 448 CU_ASSERT(group.group.buf_cache_count == 0); 449 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 450 for (i = 0; i < 4; i++) { 451 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 452 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 453 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 454 CU_ASSERT(group.group.buf_cache_count == 0); 455 } 456 457 /* part 3: half and half */ 458 group.group.buf_cache_count = 2; 459 460 for (i = 0; i < 2; i++) { 461 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 462 } 463 reset_nvmf_rdma_request(&rdma_req); 464 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 465 466 SPDK_CU_ASSERT_FATAL(rc == 0); 467 CU_ASSERT(rdma_req.req.data_from_pool == true); 468 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 469 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 470 ~NVMF_DATA_BUFFER_MASK)); 471 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 472 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 473 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 474 CU_ASSERT(group.group.buf_cache_count == 0); 475 for (i = 0; i < 2; i++) { 476 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 477 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 478 ~NVMF_DATA_BUFFER_MASK)); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 480 } 481 for (i = 2; i < 4; i++) { 482 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 483 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 484 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 485 } 486 487 reset_nvmf_rdma_request(&rdma_req); 488 /* Test 5 dealing with a buffer split over two Memory Regions */ 489 MOCK_SET(spdk_mempool_get, (void *)&buffer); 490 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 491 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 492 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 493 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 494 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 495 496 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 497 SPDK_CU_ASSERT_FATAL(rc == 0); 498 CU_ASSERT(rdma_req.req.data_from_pool == true); 499 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 500 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 501 ~NVMF_DATA_BUFFER_MASK)); 502 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 503 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 504 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 505 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 506 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 507 ~NVMF_DATA_BUFFER_MASK)); 508 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 509 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 510 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 511 CU_ASSERT(buffer_ptr == &buffer); 512 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 513 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 514 g_mr_size = 0; 515 g_mr_next_size = 0; 516 517 reset_nvmf_rdma_request(&rdma_req); 518 } 519 520 static struct spdk_nvmf_rdma_recv * 521 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 522 { 523 struct spdk_nvmf_rdma_recv *rdma_recv; 524 union nvmf_h2c_msg *cmd; 525 struct spdk_nvme_sgl_descriptor *sgl; 526 527 rdma_recv = calloc(1, sizeof(*rdma_recv)); 528 rdma_recv->qpair = rqpair; 529 cmd = calloc(1, sizeof(*cmd)); 530 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 531 cmd->nvme_cmd.opc = opc; 532 sgl = &cmd->nvme_cmd.dptr.sgl1; 533 sgl->keyed.key = 0xEEEE; 534 sgl->address = 0xFFFF; 535 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 536 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 537 sgl->keyed.length = 1; 538 539 return rdma_recv; 540 } 541 542 static void 543 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 544 { 545 free((void *)rdma_recv->sgl[0].addr); 546 free(rdma_recv); 547 } 548 549 static struct spdk_nvmf_rdma_request * 550 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 551 struct spdk_nvmf_rdma_recv *rdma_recv) 552 { 553 struct spdk_nvmf_rdma_request *rdma_req; 554 union nvmf_c2h_msg *cpl; 555 556 rdma_req = calloc(1, sizeof(*rdma_req)); 557 rdma_req->recv = rdma_recv; 558 rdma_req->req.qpair = &rqpair->qpair; 559 rdma_req->state = RDMA_REQUEST_STATE_NEW; 560 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 561 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 562 cpl = calloc(1, sizeof(*cpl)); 563 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 564 rdma_req->req.rsp = cpl; 565 566 return rdma_req; 567 } 568 569 static void 570 free_req(struct spdk_nvmf_rdma_request *rdma_req) 571 { 572 free((void *)rdma_req->rsp.sgl[0].addr); 573 free(rdma_req); 574 } 575 576 static void 577 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 578 struct spdk_nvmf_rdma_poller *poller, 579 struct spdk_nvmf_rdma_device *device, 580 struct spdk_nvmf_rdma_resources *resources) 581 { 582 memset(rqpair, 0, sizeof(*rqpair)); 583 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 584 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 585 rqpair->poller = poller; 586 rqpair->device = device; 587 rqpair->resources = resources; 588 rqpair->qpair.qid = 1; 589 rqpair->ibv_state = IBV_QPS_RTS; 590 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 591 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 592 rqpair->max_send_depth = 16; 593 rqpair->max_read_depth = 16; 594 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 595 } 596 597 static void 598 poller_reset(struct spdk_nvmf_rdma_poller *poller, 599 struct spdk_nvmf_rdma_poll_group *group) 600 { 601 memset(poller, 0, sizeof(*poller)); 602 STAILQ_INIT(&poller->qpairs_pending_recv); 603 STAILQ_INIT(&poller->qpairs_pending_send); 604 poller->group = group; 605 } 606 607 static void 608 test_spdk_nvmf_rdma_request_process(void) 609 { 610 struct spdk_nvmf_rdma_transport rtransport = {}; 611 struct spdk_nvmf_rdma_poll_group group = {}; 612 struct spdk_nvmf_rdma_poller poller = {}; 613 struct spdk_nvmf_rdma_device device = {}; 614 struct spdk_nvmf_rdma_resources resources = {}; 615 struct spdk_nvmf_rdma_qpair rqpair = {}; 616 struct spdk_nvmf_rdma_recv *rdma_recv; 617 struct spdk_nvmf_rdma_request *rdma_req; 618 bool progress; 619 620 STAILQ_INIT(&group.group.buf_cache); 621 STAILQ_INIT(&group.group.pending_buf_queue); 622 group.group.buf_cache_size = 0; 623 group.group.buf_cache_count = 0; 624 poller_reset(&poller, &group); 625 qpair_reset(&rqpair, &poller, &device, &resources); 626 627 rtransport.transport.opts = g_rdma_ut_transport_opts; 628 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 629 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 630 sizeof(struct spdk_nvmf_rdma_request_data), 631 0, 0); 632 MOCK_CLEAR(spdk_mempool_get); 633 634 device.attr.device_cap_flags = 0; 635 device.map = (void *)0x0; 636 g_rdma_mr.lkey = 0xABCD; 637 638 /* Test 1: single SGL READ request */ 639 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 640 rdma_req = create_req(&rqpair, rdma_recv); 641 rqpair.current_recv_depth = 1; 642 /* NEW -> EXECUTING */ 643 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 644 CU_ASSERT(progress == true); 645 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 646 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 647 /* EXECUTED -> TRANSFERRING_C2H */ 648 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 649 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 650 CU_ASSERT(progress == true); 651 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 652 CU_ASSERT(rdma_req->recv == NULL); 653 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 654 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 655 /* COMPLETED -> FREE */ 656 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 657 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 658 CU_ASSERT(progress == true); 659 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 660 661 free_recv(rdma_recv); 662 free_req(rdma_req); 663 poller_reset(&poller, &group); 664 qpair_reset(&rqpair, &poller, &device, &resources); 665 666 /* Test 2: single SGL WRITE request */ 667 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 668 rdma_req = create_req(&rqpair, rdma_recv); 669 rqpair.current_recv_depth = 1; 670 /* NEW -> TRANSFERRING_H2C */ 671 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 672 CU_ASSERT(progress == true); 673 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 674 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 675 STAILQ_INIT(&poller.qpairs_pending_send); 676 /* READY_TO_EXECUTE -> EXECUTING */ 677 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 678 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 679 CU_ASSERT(progress == true); 680 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 681 /* EXECUTED -> COMPLETING */ 682 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 683 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 684 CU_ASSERT(progress == true); 685 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 686 CU_ASSERT(rdma_req->recv == NULL); 687 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 688 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 689 /* COMPLETED -> FREE */ 690 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 691 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 692 CU_ASSERT(progress == true); 693 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 694 695 free_recv(rdma_recv); 696 free_req(rdma_req); 697 poller_reset(&poller, &group); 698 qpair_reset(&rqpair, &poller, &device, &resources); 699 700 /* Test 3: WRITE+WRITE ibv_send batching */ 701 { 702 struct spdk_nvmf_rdma_recv *recv1, *recv2; 703 struct spdk_nvmf_rdma_request *req1, *req2; 704 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 705 req1 = create_req(&rqpair, recv1); 706 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 707 req2 = create_req(&rqpair, recv2); 708 709 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 710 rqpair.current_recv_depth = 1; 711 nvmf_rdma_request_process(&rtransport, req1); 712 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 713 714 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 715 rqpair.current_recv_depth = 2; 716 nvmf_rdma_request_process(&rtransport, req2); 717 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 718 719 STAILQ_INIT(&poller.qpairs_pending_send); 720 721 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 722 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 723 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 724 nvmf_rdma_request_process(&rtransport, req1); 725 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 726 /* WRITE 1: EXECUTED -> COMPLETING */ 727 req1->state = RDMA_REQUEST_STATE_EXECUTED; 728 nvmf_rdma_request_process(&rtransport, req1); 729 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 730 STAILQ_INIT(&poller.qpairs_pending_send); 731 /* WRITE 1: COMPLETED -> FREE */ 732 req1->state = RDMA_REQUEST_STATE_COMPLETED; 733 nvmf_rdma_request_process(&rtransport, req1); 734 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 735 736 /* Now WRITE 2 has finished reading and completes */ 737 /* WRITE 2: COMPLETED -> FREE */ 738 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 739 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 740 nvmf_rdma_request_process(&rtransport, req2); 741 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 742 /* WRITE 1: EXECUTED -> COMPLETING */ 743 req2->state = RDMA_REQUEST_STATE_EXECUTED; 744 nvmf_rdma_request_process(&rtransport, req2); 745 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 746 STAILQ_INIT(&poller.qpairs_pending_send); 747 /* WRITE 1: COMPLETED -> FREE */ 748 req2->state = RDMA_REQUEST_STATE_COMPLETED; 749 nvmf_rdma_request_process(&rtransport, req2); 750 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 751 752 free_recv(recv1); 753 free_req(req1); 754 free_recv(recv2); 755 free_req(req2); 756 poller_reset(&poller, &group); 757 qpair_reset(&rqpair, &poller, &device, &resources); 758 } 759 760 /* Test 4, invalid command, check xfer type */ 761 { 762 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 763 struct spdk_nvmf_rdma_request *rdma_req_inv; 764 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 765 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 766 767 rdma_recv_inv = create_recv(&rqpair, opc); 768 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 769 770 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 771 rqpair.current_recv_depth = 1; 772 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 773 CU_ASSERT(progress == true); 774 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 775 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 776 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 777 778 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 779 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 780 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 781 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 782 783 free_recv(rdma_recv_inv); 784 free_req(rdma_req_inv); 785 poller_reset(&poller, &group); 786 qpair_reset(&rqpair, &poller, &device, &resources); 787 } 788 789 spdk_mempool_free(rtransport.transport.data_buf_pool); 790 spdk_mempool_free(rtransport.data_wr_pool); 791 } 792 793 #define TEST_GROUPS_COUNT 5 794 static void 795 test_nvmf_rdma_get_optimal_poll_group(void) 796 { 797 struct spdk_nvmf_rdma_transport rtransport = {}; 798 struct spdk_nvmf_transport *transport = &rtransport.transport; 799 struct spdk_nvmf_rdma_qpair rqpair = {}; 800 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 801 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 802 struct spdk_nvmf_transport_poll_group *result; 803 uint32_t i; 804 805 rqpair.qpair.transport = transport; 806 pthread_mutex_init(&rtransport.lock, NULL); 807 TAILQ_INIT(&rtransport.poll_groups); 808 809 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 810 groups[i] = nvmf_rdma_poll_group_create(transport); 811 CU_ASSERT(groups[i] != NULL); 812 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 813 groups[i]->transport = transport; 814 } 815 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 816 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 817 818 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 819 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 820 rqpair.qpair.qid = 0; 821 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 822 CU_ASSERT(result == groups[i]); 823 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 824 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 825 826 rqpair.qpair.qid = 1; 827 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 828 CU_ASSERT(result == groups[i]); 829 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 830 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 831 } 832 /* wrap around, admin/io pg point to the first pg 833 Destroy all poll groups except of the last one */ 834 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 835 nvmf_rdma_poll_group_destroy(groups[i]); 836 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 837 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 838 } 839 840 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 841 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 842 843 /* Check that pointers to the next admin/io poll groups are not changed */ 844 rqpair.qpair.qid = 0; 845 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 846 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 847 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 848 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 849 850 rqpair.qpair.qid = 1; 851 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 852 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 853 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 854 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 855 856 /* Remove the last poll group, check that pointers are NULL */ 857 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 858 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 859 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 860 861 /* Request optimal poll group, result must be NULL */ 862 rqpair.qpair.qid = 0; 863 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 864 CU_ASSERT(result == NULL); 865 866 rqpair.qpair.qid = 1; 867 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 868 CU_ASSERT(result == NULL); 869 870 pthread_mutex_destroy(&rtransport.lock); 871 } 872 #undef TEST_GROUPS_COUNT 873 874 static void 875 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 876 { 877 struct spdk_nvmf_rdma_transport rtransport; 878 struct spdk_nvmf_rdma_device device; 879 struct spdk_nvmf_rdma_request rdma_req = {}; 880 struct spdk_nvmf_rdma_recv recv; 881 struct spdk_nvmf_rdma_poll_group group; 882 struct spdk_nvmf_rdma_qpair rqpair; 883 struct spdk_nvmf_rdma_poller poller; 884 union nvmf_c2h_msg cpl; 885 union nvmf_h2c_msg cmd; 886 struct spdk_nvme_sgl_descriptor *sgl; 887 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 888 struct spdk_nvmf_rdma_request_data data; 889 struct spdk_nvmf_transport_pg_cache_buf buffer; 890 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 891 const uint32_t data_bs = 512; 892 const uint32_t md_size = 8; 893 int rc, i; 894 void *aligned_buffer; 895 896 data.wr.sg_list = data.sgl; 897 STAILQ_INIT(&group.group.buf_cache); 898 group.group.buf_cache_size = 0; 899 group.group.buf_cache_count = 0; 900 group.group.transport = &rtransport.transport; 901 STAILQ_INIT(&group.retired_bufs); 902 poller.group = &group; 903 rqpair.poller = &poller; 904 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 905 906 sgl = &cmd.nvme_cmd.dptr.sgl1; 907 rdma_req.recv = &recv; 908 rdma_req.req.cmd = &cmd; 909 rdma_req.req.rsp = &cpl; 910 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 911 rdma_req.req.qpair = &rqpair.qpair; 912 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 913 914 rtransport.transport.opts = g_rdma_ut_transport_opts; 915 rtransport.data_wr_pool = NULL; 916 rtransport.transport.data_buf_pool = NULL; 917 918 device.attr.device_cap_flags = 0; 919 device.map = NULL; 920 g_rdma_mr.lkey = 0xABCD; 921 sgl->keyed.key = 0xEEEE; 922 sgl->address = 0xFFFF; 923 rdma_req.recv->buf = (void *)0xDDDD; 924 925 /* Test 1: sgl type: keyed data block subtype: address */ 926 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 927 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 928 929 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 930 MOCK_SET(spdk_mempool_get, (void *)0x2000); 931 reset_nvmf_rdma_request(&rdma_req); 932 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 933 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 934 0, 0, 0, 0, 0); 935 rdma_req.req.dif.dif_insert_or_strip = true; 936 rtransport.transport.opts.io_unit_size = data_bs * 8; 937 sgl->keyed.length = data_bs * 4; 938 939 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 940 941 CU_ASSERT(rc == 0); 942 CU_ASSERT(rdma_req.req.data_from_pool == true); 943 CU_ASSERT(rdma_req.req.length == data_bs * 4); 944 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 945 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 946 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 947 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 948 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 949 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 950 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 951 952 for (i = 0; i < 4; ++i) { 953 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 954 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 955 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 956 } 957 958 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 959 block size 512 */ 960 MOCK_SET(spdk_mempool_get, (void *)0x2000); 961 reset_nvmf_rdma_request(&rdma_req); 962 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 963 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 964 0, 0, 0, 0, 0); 965 rdma_req.req.dif.dif_insert_or_strip = true; 966 rtransport.transport.opts.io_unit_size = data_bs * 4; 967 sgl->keyed.length = data_bs * 4; 968 969 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 970 971 CU_ASSERT(rc == 0); 972 CU_ASSERT(rdma_req.req.data_from_pool == true); 973 CU_ASSERT(rdma_req.req.length == data_bs * 4); 974 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 975 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 976 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 977 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 978 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 979 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 980 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 981 982 for (i = 0; i < 3; ++i) { 983 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 984 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 985 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 986 } 987 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 988 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 989 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 990 991 /* 2nd buffer consumed */ 992 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 993 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 994 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 995 996 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 997 MOCK_SET(spdk_mempool_get, (void *)0x2000); 998 reset_nvmf_rdma_request(&rdma_req); 999 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1000 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1001 0, 0, 0, 0, 0); 1002 rdma_req.req.dif.dif_insert_or_strip = true; 1003 rtransport.transport.opts.io_unit_size = data_bs; 1004 sgl->keyed.length = data_bs; 1005 1006 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1007 1008 CU_ASSERT(rc == 0); 1009 CU_ASSERT(rdma_req.req.data_from_pool == true); 1010 CU_ASSERT(rdma_req.req.length == data_bs); 1011 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1012 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1013 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1014 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1015 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1016 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1017 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1018 1019 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1020 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1021 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1022 1023 CU_ASSERT(rdma_req.req.iovcnt == 2); 1024 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1025 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1026 /* 2nd buffer consumed for metadata */ 1027 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1028 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1029 1030 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1031 block size 512 */ 1032 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1033 reset_nvmf_rdma_request(&rdma_req); 1034 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1035 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1036 0, 0, 0, 0, 0); 1037 rdma_req.req.dif.dif_insert_or_strip = true; 1038 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1039 sgl->keyed.length = data_bs * 4; 1040 1041 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1042 1043 CU_ASSERT(rc == 0); 1044 CU_ASSERT(rdma_req.req.data_from_pool == true); 1045 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1046 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1047 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1048 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1049 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1050 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1051 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1052 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1053 1054 for (i = 0; i < 4; ++i) { 1055 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1056 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1057 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1058 } 1059 1060 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1061 block size 512 */ 1062 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1063 reset_nvmf_rdma_request(&rdma_req); 1064 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1065 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1066 0, 0, 0, 0, 0); 1067 rdma_req.req.dif.dif_insert_or_strip = true; 1068 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1069 sgl->keyed.length = data_bs * 4; 1070 1071 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1072 1073 CU_ASSERT(rc == 0); 1074 CU_ASSERT(rdma_req.req.data_from_pool == true); 1075 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1076 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1077 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1078 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1079 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1080 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1081 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1082 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1083 1084 for (i = 0; i < 2; ++i) { 1085 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1086 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1087 } 1088 for (i = 0; i < 2; ++i) { 1089 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1090 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1091 } 1092 1093 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1094 block size 512 */ 1095 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1096 reset_nvmf_rdma_request(&rdma_req); 1097 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1098 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1099 0, 0, 0, 0, 0); 1100 rdma_req.req.dif.dif_insert_or_strip = true; 1101 rtransport.transport.opts.io_unit_size = data_bs * 4; 1102 sgl->keyed.length = data_bs * 6; 1103 1104 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1105 1106 CU_ASSERT(rc == 0); 1107 CU_ASSERT(rdma_req.req.data_from_pool == true); 1108 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1109 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1110 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1111 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1112 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1113 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1114 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1115 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1116 1117 for (i = 0; i < 3; ++i) { 1118 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1119 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1120 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1121 } 1122 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1123 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1124 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1125 1126 /* 2nd IO buffer consumed */ 1127 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1128 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1129 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1130 1131 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1132 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1133 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1134 1135 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1136 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1137 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1138 1139 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1140 one WR can hold. Additional WR is chained */ 1141 MOCK_SET(spdk_mempool_get, &data); 1142 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1143 ~NVMF_DATA_BUFFER_MASK); 1144 reset_nvmf_rdma_request(&rdma_req); 1145 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1146 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1147 0, 0, 0, 0, 0); 1148 rdma_req.req.dif.dif_insert_or_strip = true; 1149 rtransport.transport.opts.io_unit_size = data_bs * 16; 1150 sgl->keyed.length = data_bs * 16; 1151 1152 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1153 1154 CU_ASSERT(rc == 0); 1155 CU_ASSERT(rdma_req.req.data_from_pool == true); 1156 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1157 CU_ASSERT(rdma_req.req.iovcnt == 2); 1158 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1159 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1160 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1161 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1162 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1163 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1164 /* additional wr from pool */ 1165 CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr); 1166 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1167 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1168 1169 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1170 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1171 reset_nvmf_rdma_request(&rdma_req); 1172 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1173 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1174 0, 0, 0, 0, 0); 1175 rdma_req.req.dif.dif_insert_or_strip = true; 1176 rtransport.transport.opts.io_unit_size = 516; 1177 sgl->keyed.length = data_bs * 2; 1178 1179 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1180 1181 CU_ASSERT(rc == 0); 1182 CU_ASSERT(rdma_req.req.data_from_pool == true); 1183 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1184 CU_ASSERT(rdma_req.req.iovcnt == 3); 1185 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1186 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1187 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1188 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1189 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1190 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1191 1192 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1193 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1194 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1195 1196 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1197 is located at the beginning of that buffer */ 1198 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1199 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1201 1202 /* Test 9 dealing with a buffer split over two Memory Regions */ 1203 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1204 reset_nvmf_rdma_request(&rdma_req); 1205 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1206 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1207 0, 0, 0, 0, 0); 1208 rdma_req.req.dif.dif_insert_or_strip = true; 1209 rtransport.transport.opts.io_unit_size = data_bs * 4; 1210 sgl->keyed.length = data_bs * 2; 1211 g_mr_size = data_bs; 1212 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1213 1214 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1215 SPDK_CU_ASSERT_FATAL(rc == 0); 1216 CU_ASSERT(rdma_req.req.data_from_pool == true); 1217 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1218 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1219 ~NVMF_DATA_BUFFER_MASK)); 1220 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1221 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1222 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1223 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1224 for (i = 0; i < 2; i++) { 1225 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1226 (data_bs + md_size)); 1227 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1228 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1229 } 1230 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1231 CU_ASSERT(buffer_ptr == &buffer); 1232 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1233 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1234 g_mr_size = 0; 1235 g_mr_next_size = 0; 1236 1237 /* Test 2: Multi SGL */ 1238 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1239 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1240 sgl->address = 0; 1241 rdma_req.recv->buf = (void *)&sgl_desc; 1242 MOCK_SET(spdk_mempool_get, &data); 1243 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1244 ~NVMF_DATA_BUFFER_MASK); 1245 1246 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1247 reset_nvmf_rdma_request(&rdma_req); 1248 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1249 SPDK_DIF_TYPE1, 1250 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1251 rdma_req.req.dif.dif_insert_or_strip = true; 1252 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1253 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1254 1255 for (i = 0; i < 2; i++) { 1256 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1257 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1258 sgl_desc[i].keyed.length = data_bs * 4; 1259 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1260 sgl_desc[i].keyed.key = 0x44; 1261 } 1262 1263 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1264 1265 CU_ASSERT(rc == 0); 1266 CU_ASSERT(rdma_req.req.data_from_pool == true); 1267 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1268 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1269 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1270 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1271 for (i = 0; i < 4; ++i) { 1272 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1273 (data_bs + md_size)); 1274 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1275 } 1276 1277 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1278 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1279 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1280 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1281 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1282 CU_ASSERT(data.wr.num_sge == 4); 1283 for (i = 0; i < 4; ++i) { 1284 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1285 (data_bs + md_size)); 1286 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1287 } 1288 1289 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1290 } 1291 1292 int main(int argc, char **argv) 1293 { 1294 CU_pSuite suite = NULL; 1295 unsigned int num_failures; 1296 1297 CU_set_error_action(CUEA_ABORT); 1298 CU_initialize_registry(); 1299 1300 suite = CU_add_suite("nvmf", NULL, NULL); 1301 1302 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1303 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1304 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1305 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1306 1307 CU_basic_set_mode(CU_BRM_VERBOSE); 1308 CU_basic_run_tests(); 1309 num_failures = CU_get_number_of_failures(); 1310 CU_cleanup_registry(); 1311 return num_failures; 1312 } 1313