1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 uint64_t g_mr_size; 42 uint64_t g_mr_next_size; 43 struct ibv_mr g_rdma_mr; 44 45 #define RDMA_UT_UNITS_IN_MAX_IO 16 46 47 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 48 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 49 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 50 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 51 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 52 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 53 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 54 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 55 }; 56 57 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 58 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 59 uint64_t size, uint64_t translation), 0); 60 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 61 uint64_t size), 0); 62 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 63 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 64 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 65 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 66 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 67 68 struct spdk_trace_histories *g_trace_histories; 69 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 70 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 71 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 72 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 73 uint8_t arg1_type, const char *arg1_name)); 74 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 75 uint32_t size, uint64_t object_id, uint64_t arg1)); 76 77 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 78 struct spdk_nvmf_ctrlr_data *cdata)); 79 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 80 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 81 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 82 const struct spdk_nvme_transport_id *trid2), 0); 83 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 84 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 85 struct spdk_dif_ctx *dif_ctx), false); 86 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 87 enum spdk_nvme_transport_type trtype)); 88 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 89 DEFINE_STUB(nvmf_ctrlr_abort_request, int, 90 (struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 91 0); 92 93 const char * 94 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 95 { 96 switch (trtype) { 97 case SPDK_NVME_TRANSPORT_PCIE: 98 return "PCIe"; 99 case SPDK_NVME_TRANSPORT_RDMA: 100 return "RDMA"; 101 case SPDK_NVME_TRANSPORT_FC: 102 return "FC"; 103 default: 104 return NULL; 105 } 106 } 107 108 int 109 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 110 { 111 int len, i; 112 113 if (trstring == NULL) { 114 return -EINVAL; 115 } 116 117 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 118 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 119 return -EINVAL; 120 } 121 122 /* cast official trstring to uppercase version of input. */ 123 for (i = 0; i < len; i++) { 124 trid->trstring[i] = toupper(trstring[i]); 125 } 126 return 0; 127 } 128 129 uint64_t 130 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 131 { 132 if (g_mr_size != 0) { 133 *(uint32_t *)size = g_mr_size; 134 if (g_mr_next_size != 0) { 135 g_mr_size = g_mr_next_size; 136 } 137 } 138 139 return (uint64_t)&g_rdma_mr; 140 } 141 142 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 143 { 144 int i; 145 146 rdma_req->req.length = 0; 147 rdma_req->req.data_from_pool = false; 148 rdma_req->req.data = NULL; 149 rdma_req->data.wr.num_sge = 0; 150 rdma_req->data.wr.wr.rdma.remote_addr = 0; 151 rdma_req->data.wr.wr.rdma.rkey = 0; 152 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 153 154 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 155 rdma_req->req.iov[i].iov_base = 0; 156 rdma_req->req.iov[i].iov_len = 0; 157 rdma_req->req.buffers[i] = 0; 158 rdma_req->data.wr.sg_list[i].addr = 0; 159 rdma_req->data.wr.sg_list[i].length = 0; 160 rdma_req->data.wr.sg_list[i].lkey = 0; 161 } 162 rdma_req->req.iovcnt = 0; 163 } 164 165 static void 166 test_spdk_nvmf_rdma_request_parse_sgl(void) 167 { 168 struct spdk_nvmf_rdma_transport rtransport; 169 struct spdk_nvmf_rdma_device device; 170 struct spdk_nvmf_rdma_request rdma_req = {}; 171 struct spdk_nvmf_rdma_recv recv; 172 struct spdk_nvmf_rdma_poll_group group; 173 struct spdk_nvmf_rdma_qpair rqpair; 174 struct spdk_nvmf_rdma_poller poller; 175 union nvmf_c2h_msg cpl; 176 union nvmf_h2c_msg cmd; 177 struct spdk_nvme_sgl_descriptor *sgl; 178 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 179 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 180 struct spdk_nvmf_rdma_request_data data; 181 struct spdk_nvmf_transport_pg_cache_buf buffer; 182 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 183 int rc, i; 184 185 data.wr.sg_list = data.sgl; 186 STAILQ_INIT(&group.group.buf_cache); 187 group.group.buf_cache_size = 0; 188 group.group.buf_cache_count = 0; 189 group.group.transport = &rtransport.transport; 190 STAILQ_INIT(&group.retired_bufs); 191 poller.group = &group; 192 rqpair.poller = &poller; 193 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 194 195 sgl = &cmd.nvme_cmd.dptr.sgl1; 196 rdma_req.recv = &recv; 197 rdma_req.req.cmd = &cmd; 198 rdma_req.req.rsp = &cpl; 199 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 200 rdma_req.req.qpair = &rqpair.qpair; 201 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 202 203 rtransport.transport.opts = g_rdma_ut_transport_opts; 204 rtransport.data_wr_pool = NULL; 205 rtransport.transport.data_buf_pool = NULL; 206 207 device.attr.device_cap_flags = 0; 208 g_rdma_mr.lkey = 0xABCD; 209 sgl->keyed.key = 0xEEEE; 210 sgl->address = 0xFFFF; 211 rdma_req.recv->buf = (void *)0xDDDD; 212 213 /* Test 1: sgl type: keyed data block subtype: address */ 214 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 215 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 216 217 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 218 MOCK_SET(spdk_mempool_get, (void *)0x2000); 219 reset_nvmf_rdma_request(&rdma_req); 220 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 221 222 device.map = (void *)0x0; 223 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 224 CU_ASSERT(rc == 0); 225 CU_ASSERT(rdma_req.req.data_from_pool == true); 226 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 227 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 228 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 229 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 230 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 231 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 232 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 233 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 234 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 235 236 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 237 reset_nvmf_rdma_request(&rdma_req); 238 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 239 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 240 241 CU_ASSERT(rc == 0); 242 CU_ASSERT(rdma_req.req.data_from_pool == true); 243 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 244 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 245 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 246 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 247 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 248 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 249 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 250 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 251 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 252 } 253 254 /* Part 3: simple I/O one SGL larger than the transport max io size */ 255 reset_nvmf_rdma_request(&rdma_req); 256 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 257 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 258 259 CU_ASSERT(rc == -1); 260 261 /* Part 4: Pretend there are no buffer pools */ 262 MOCK_SET(spdk_mempool_get, NULL); 263 reset_nvmf_rdma_request(&rdma_req); 264 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 265 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 266 267 CU_ASSERT(rc == 0); 268 CU_ASSERT(rdma_req.req.data_from_pool == false); 269 CU_ASSERT(rdma_req.req.data == NULL); 270 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 271 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 272 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 273 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 274 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 275 276 rdma_req.recv->buf = (void *)0xDDDD; 277 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 278 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 279 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 280 281 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 282 reset_nvmf_rdma_request(&rdma_req); 283 sgl->address = 0; 284 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 285 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 286 287 CU_ASSERT(rc == 0); 288 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 289 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 290 CU_ASSERT(rdma_req.req.data_from_pool == false); 291 292 /* Part 2: I/O offset + length too large */ 293 reset_nvmf_rdma_request(&rdma_req); 294 sgl->address = rtransport.transport.opts.in_capsule_data_size; 295 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 296 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 297 298 CU_ASSERT(rc == -1); 299 300 /* Part 3: I/O too large */ 301 reset_nvmf_rdma_request(&rdma_req); 302 sgl->address = 0; 303 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 304 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 305 306 CU_ASSERT(rc == -1); 307 308 /* Test 3: Multi SGL */ 309 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 310 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 311 sgl->address = 0; 312 rdma_req.recv->buf = (void *)&sgl_desc; 313 MOCK_SET(spdk_mempool_get, &data); 314 315 /* part 1: 2 segments each with 1 wr. */ 316 reset_nvmf_rdma_request(&rdma_req); 317 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 318 for (i = 0; i < 2; i++) { 319 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 320 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 321 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 322 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 323 sgl_desc[i].keyed.key = 0x44; 324 } 325 326 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 327 328 CU_ASSERT(rc == 0); 329 CU_ASSERT(rdma_req.req.data_from_pool == true); 330 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 331 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 332 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 333 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 334 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 335 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 336 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 337 CU_ASSERT(data.wr.num_sge == 1); 338 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 339 340 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 341 reset_nvmf_rdma_request(&rdma_req); 342 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 343 for (i = 0; i < 2; i++) { 344 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 345 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 346 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 347 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 348 sgl_desc[i].keyed.key = 0x44; 349 } 350 351 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 352 353 CU_ASSERT(rc == 0); 354 CU_ASSERT(rdma_req.req.data_from_pool == true); 355 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 356 CU_ASSERT(rdma_req.req.iovcnt == 16); 357 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 358 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 359 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 360 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 361 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 362 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 363 CU_ASSERT(data.wr.num_sge == 8); 364 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 365 366 /* part 3: 2 segments, one very large, one very small */ 367 reset_nvmf_rdma_request(&rdma_req); 368 for (i = 0; i < 2; i++) { 369 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 370 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 371 sgl_desc[i].keyed.key = 0x44; 372 } 373 374 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 375 rtransport.transport.opts.io_unit_size / 2; 376 sgl_desc[0].address = 0x4000; 377 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 378 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 379 rtransport.transport.opts.io_unit_size / 2; 380 381 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 382 383 CU_ASSERT(rc == 0); 384 CU_ASSERT(rdma_req.req.data_from_pool == true); 385 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 386 CU_ASSERT(rdma_req.req.iovcnt == 17); 387 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 388 for (i = 0; i < 15; i++) { 389 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 390 } 391 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 392 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 393 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 394 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 395 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 396 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 397 rtransport.transport.opts.io_unit_size / 2); 398 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 399 CU_ASSERT(data.wr.num_sge == 1); 400 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 401 402 /* Test 4: use PG buffer cache */ 403 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 404 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 405 sgl->address = 0xFFFF; 406 rdma_req.recv->buf = (void *)0xDDDD; 407 g_rdma_mr.lkey = 0xABCD; 408 sgl->keyed.key = 0xEEEE; 409 410 for (i = 0; i < 4; i++) { 411 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 412 } 413 414 /* part 1: use the four buffers from the pg cache */ 415 group.group.buf_cache_size = 4; 416 group.group.buf_cache_count = 4; 417 MOCK_SET(spdk_mempool_get, (void *)0x2000); 418 reset_nvmf_rdma_request(&rdma_req); 419 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 420 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 421 422 SPDK_CU_ASSERT_FATAL(rc == 0); 423 CU_ASSERT(rdma_req.req.data_from_pool == true); 424 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 425 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 426 ~NVMF_DATA_BUFFER_MASK)); 427 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 428 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 429 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 430 CU_ASSERT(group.group.buf_cache_count == 0); 431 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 432 for (i = 0; i < 4; i++) { 433 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 434 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 435 ~NVMF_DATA_BUFFER_MASK)); 436 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 437 } 438 439 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 440 reset_nvmf_rdma_request(&rdma_req); 441 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 442 443 SPDK_CU_ASSERT_FATAL(rc == 0); 444 CU_ASSERT(rdma_req.req.data_from_pool == true); 445 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 446 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 447 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 448 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 449 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 450 CU_ASSERT(group.group.buf_cache_count == 0); 451 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 452 for (i = 0; i < 4; i++) { 453 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 454 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 455 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 456 CU_ASSERT(group.group.buf_cache_count == 0); 457 } 458 459 /* part 3: half and half */ 460 group.group.buf_cache_count = 2; 461 462 for (i = 0; i < 2; i++) { 463 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 464 } 465 reset_nvmf_rdma_request(&rdma_req); 466 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 467 468 SPDK_CU_ASSERT_FATAL(rc == 0); 469 CU_ASSERT(rdma_req.req.data_from_pool == true); 470 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 471 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 472 ~NVMF_DATA_BUFFER_MASK)); 473 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 474 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 475 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 476 CU_ASSERT(group.group.buf_cache_count == 0); 477 for (i = 0; i < 2; i++) { 478 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 480 ~NVMF_DATA_BUFFER_MASK)); 481 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 482 } 483 for (i = 2; i < 4; i++) { 484 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 485 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 486 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 487 } 488 489 reset_nvmf_rdma_request(&rdma_req); 490 /* Test 5 dealing with a buffer split over two Memory Regions */ 491 MOCK_SET(spdk_mempool_get, (void *)&buffer); 492 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 493 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 494 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 495 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 496 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 497 498 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 499 SPDK_CU_ASSERT_FATAL(rc == 0); 500 CU_ASSERT(rdma_req.req.data_from_pool == true); 501 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 502 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 503 ~NVMF_DATA_BUFFER_MASK)); 504 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 505 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 506 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 507 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 508 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 509 ~NVMF_DATA_BUFFER_MASK)); 510 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 511 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 512 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 513 CU_ASSERT(buffer_ptr == &buffer); 514 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 515 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 516 g_mr_size = 0; 517 g_mr_next_size = 0; 518 519 reset_nvmf_rdma_request(&rdma_req); 520 } 521 522 static struct spdk_nvmf_rdma_recv * 523 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 524 { 525 struct spdk_nvmf_rdma_recv *rdma_recv; 526 union nvmf_h2c_msg *cmd; 527 struct spdk_nvme_sgl_descriptor *sgl; 528 529 rdma_recv = calloc(1, sizeof(*rdma_recv)); 530 rdma_recv->qpair = rqpair; 531 cmd = calloc(1, sizeof(*cmd)); 532 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 533 cmd->nvme_cmd.opc = opc; 534 sgl = &cmd->nvme_cmd.dptr.sgl1; 535 sgl->keyed.key = 0xEEEE; 536 sgl->address = 0xFFFF; 537 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 538 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 539 sgl->keyed.length = 1; 540 541 return rdma_recv; 542 } 543 544 static void 545 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 546 { 547 free((void *)rdma_recv->sgl[0].addr); 548 free(rdma_recv); 549 } 550 551 static struct spdk_nvmf_rdma_request * 552 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 553 struct spdk_nvmf_rdma_recv *rdma_recv) 554 { 555 struct spdk_nvmf_rdma_request *rdma_req; 556 union nvmf_c2h_msg *cpl; 557 558 rdma_req = calloc(1, sizeof(*rdma_req)); 559 rdma_req->recv = rdma_recv; 560 rdma_req->req.qpair = &rqpair->qpair; 561 rdma_req->state = RDMA_REQUEST_STATE_NEW; 562 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 563 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 564 cpl = calloc(1, sizeof(*cpl)); 565 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 566 rdma_req->req.rsp = cpl; 567 568 return rdma_req; 569 } 570 571 static void 572 free_req(struct spdk_nvmf_rdma_request *rdma_req) 573 { 574 free((void *)rdma_req->rsp.sgl[0].addr); 575 free(rdma_req); 576 } 577 578 static void 579 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 580 struct spdk_nvmf_rdma_poller *poller, 581 struct spdk_nvmf_rdma_device *device, 582 struct spdk_nvmf_rdma_resources *resources) 583 { 584 memset(rqpair, 0, sizeof(*rqpair)); 585 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 586 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 587 rqpair->poller = poller; 588 rqpair->device = device; 589 rqpair->resources = resources; 590 rqpair->qpair.qid = 1; 591 rqpair->ibv_state = IBV_QPS_RTS; 592 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 593 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 594 rqpair->max_send_depth = 16; 595 rqpair->max_read_depth = 16; 596 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 597 } 598 599 static void 600 poller_reset(struct spdk_nvmf_rdma_poller *poller, 601 struct spdk_nvmf_rdma_poll_group *group) 602 { 603 memset(poller, 0, sizeof(*poller)); 604 STAILQ_INIT(&poller->qpairs_pending_recv); 605 STAILQ_INIT(&poller->qpairs_pending_send); 606 poller->group = group; 607 } 608 609 static void 610 test_spdk_nvmf_rdma_request_process(void) 611 { 612 struct spdk_nvmf_rdma_transport rtransport = {}; 613 struct spdk_nvmf_rdma_poll_group group = {}; 614 struct spdk_nvmf_rdma_poller poller = {}; 615 struct spdk_nvmf_rdma_device device = {}; 616 struct spdk_nvmf_rdma_resources resources = {}; 617 struct spdk_nvmf_rdma_qpair rqpair = {}; 618 struct spdk_nvmf_rdma_recv *rdma_recv; 619 struct spdk_nvmf_rdma_request *rdma_req; 620 bool progress; 621 622 STAILQ_INIT(&group.group.buf_cache); 623 STAILQ_INIT(&group.group.pending_buf_queue); 624 group.group.buf_cache_size = 0; 625 group.group.buf_cache_count = 0; 626 poller_reset(&poller, &group); 627 qpair_reset(&rqpair, &poller, &device, &resources); 628 629 rtransport.transport.opts = g_rdma_ut_transport_opts; 630 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 631 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 632 sizeof(struct spdk_nvmf_rdma_request_data), 633 0, 0); 634 MOCK_CLEAR(spdk_mempool_get); 635 636 device.attr.device_cap_flags = 0; 637 device.map = (void *)0x0; 638 g_rdma_mr.lkey = 0xABCD; 639 640 /* Test 1: single SGL READ request */ 641 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 642 rdma_req = create_req(&rqpair, rdma_recv); 643 rqpair.current_recv_depth = 1; 644 /* NEW -> EXECUTING */ 645 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 646 CU_ASSERT(progress == true); 647 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 648 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 649 /* EXECUTED -> TRANSFERRING_C2H */ 650 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 651 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 652 CU_ASSERT(progress == true); 653 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 654 CU_ASSERT(rdma_req->recv == NULL); 655 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 656 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 657 /* COMPLETED -> FREE */ 658 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 659 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 660 CU_ASSERT(progress == true); 661 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 662 663 free_recv(rdma_recv); 664 free_req(rdma_req); 665 poller_reset(&poller, &group); 666 qpair_reset(&rqpair, &poller, &device, &resources); 667 668 /* Test 2: single SGL WRITE request */ 669 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 670 rdma_req = create_req(&rqpair, rdma_recv); 671 rqpair.current_recv_depth = 1; 672 /* NEW -> TRANSFERRING_H2C */ 673 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 674 CU_ASSERT(progress == true); 675 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 676 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 677 STAILQ_INIT(&poller.qpairs_pending_send); 678 /* READY_TO_EXECUTE -> EXECUTING */ 679 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 680 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 681 CU_ASSERT(progress == true); 682 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 683 /* EXECUTED -> COMPLETING */ 684 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 685 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 686 CU_ASSERT(progress == true); 687 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 688 CU_ASSERT(rdma_req->recv == NULL); 689 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 690 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 691 /* COMPLETED -> FREE */ 692 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 693 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 694 CU_ASSERT(progress == true); 695 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 696 697 free_recv(rdma_recv); 698 free_req(rdma_req); 699 poller_reset(&poller, &group); 700 qpair_reset(&rqpair, &poller, &device, &resources); 701 702 /* Test 3: WRITE+WRITE ibv_send batching */ 703 { 704 struct spdk_nvmf_rdma_recv *recv1, *recv2; 705 struct spdk_nvmf_rdma_request *req1, *req2; 706 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 707 req1 = create_req(&rqpair, recv1); 708 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 709 req2 = create_req(&rqpair, recv2); 710 711 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 712 rqpair.current_recv_depth = 1; 713 nvmf_rdma_request_process(&rtransport, req1); 714 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 715 716 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 717 rqpair.current_recv_depth = 2; 718 nvmf_rdma_request_process(&rtransport, req2); 719 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 720 721 STAILQ_INIT(&poller.qpairs_pending_send); 722 723 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 724 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 725 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 726 nvmf_rdma_request_process(&rtransport, req1); 727 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 728 /* WRITE 1: EXECUTED -> COMPLETING */ 729 req1->state = RDMA_REQUEST_STATE_EXECUTED; 730 nvmf_rdma_request_process(&rtransport, req1); 731 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 732 STAILQ_INIT(&poller.qpairs_pending_send); 733 /* WRITE 1: COMPLETED -> FREE */ 734 req1->state = RDMA_REQUEST_STATE_COMPLETED; 735 nvmf_rdma_request_process(&rtransport, req1); 736 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 737 738 /* Now WRITE 2 has finished reading and completes */ 739 /* WRITE 2: COMPLETED -> FREE */ 740 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 741 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 742 nvmf_rdma_request_process(&rtransport, req2); 743 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 744 /* WRITE 1: EXECUTED -> COMPLETING */ 745 req2->state = RDMA_REQUEST_STATE_EXECUTED; 746 nvmf_rdma_request_process(&rtransport, req2); 747 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 748 STAILQ_INIT(&poller.qpairs_pending_send); 749 /* WRITE 1: COMPLETED -> FREE */ 750 req2->state = RDMA_REQUEST_STATE_COMPLETED; 751 nvmf_rdma_request_process(&rtransport, req2); 752 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 753 754 free_recv(recv1); 755 free_req(req1); 756 free_recv(recv2); 757 free_req(req2); 758 poller_reset(&poller, &group); 759 qpair_reset(&rqpair, &poller, &device, &resources); 760 } 761 762 spdk_mempool_free(rtransport.transport.data_buf_pool); 763 spdk_mempool_free(rtransport.data_wr_pool); 764 } 765 766 #define TEST_GROUPS_COUNT 5 767 static void 768 test_nvmf_rdma_get_optimal_poll_group(void) 769 { 770 struct spdk_nvmf_rdma_transport rtransport = {}; 771 struct spdk_nvmf_transport *transport = &rtransport.transport; 772 struct spdk_nvmf_rdma_qpair rqpair = {}; 773 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 774 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 775 struct spdk_nvmf_transport_poll_group *result; 776 uint32_t i; 777 778 rqpair.qpair.transport = transport; 779 pthread_mutex_init(&rtransport.lock, NULL); 780 TAILQ_INIT(&rtransport.poll_groups); 781 782 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 783 groups[i] = nvmf_rdma_poll_group_create(transport); 784 CU_ASSERT(groups[i] != NULL); 785 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 786 groups[i]->transport = transport; 787 } 788 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 789 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 790 791 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 792 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 793 rqpair.qpair.qid = 0; 794 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 795 CU_ASSERT(result == groups[i]); 796 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 797 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 798 799 rqpair.qpair.qid = 1; 800 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 801 CU_ASSERT(result == groups[i]); 802 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 803 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 804 } 805 /* wrap around, admin/io pg point to the first pg 806 Destroy all poll groups except of the last one */ 807 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 808 nvmf_rdma_poll_group_destroy(groups[i]); 809 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 810 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 811 } 812 813 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 814 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 815 816 /* Check that pointers to the next admin/io poll groups are not changed */ 817 rqpair.qpair.qid = 0; 818 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 819 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 820 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 821 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 822 823 rqpair.qpair.qid = 1; 824 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 825 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 826 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 827 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 828 829 /* Remove the last poll group, check that pointers are NULL */ 830 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 831 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 832 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 833 834 /* Request optimal poll group, result must be NULL */ 835 rqpair.qpair.qid = 0; 836 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 837 CU_ASSERT(result == NULL); 838 839 rqpair.qpair.qid = 1; 840 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 841 CU_ASSERT(result == NULL); 842 843 pthread_mutex_destroy(&rtransport.lock); 844 } 845 #undef TEST_GROUPS_COUNT 846 847 static void 848 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 849 { 850 struct spdk_nvmf_rdma_transport rtransport; 851 struct spdk_nvmf_rdma_device device; 852 struct spdk_nvmf_rdma_request rdma_req = {}; 853 struct spdk_nvmf_rdma_recv recv; 854 struct spdk_nvmf_rdma_poll_group group; 855 struct spdk_nvmf_rdma_qpair rqpair; 856 struct spdk_nvmf_rdma_poller poller; 857 union nvmf_c2h_msg cpl; 858 union nvmf_h2c_msg cmd; 859 struct spdk_nvme_sgl_descriptor *sgl; 860 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 861 struct spdk_nvmf_rdma_request_data data; 862 struct spdk_nvmf_transport_pg_cache_buf buffer; 863 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 864 const uint32_t data_bs = 512; 865 const uint32_t md_size = 8; 866 int rc, i; 867 void *aligned_buffer; 868 869 data.wr.sg_list = data.sgl; 870 STAILQ_INIT(&group.group.buf_cache); 871 group.group.buf_cache_size = 0; 872 group.group.buf_cache_count = 0; 873 group.group.transport = &rtransport.transport; 874 STAILQ_INIT(&group.retired_bufs); 875 poller.group = &group; 876 rqpair.poller = &poller; 877 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 878 879 sgl = &cmd.nvme_cmd.dptr.sgl1; 880 rdma_req.recv = &recv; 881 rdma_req.req.cmd = &cmd; 882 rdma_req.req.rsp = &cpl; 883 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 884 rdma_req.req.qpair = &rqpair.qpair; 885 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 886 887 rtransport.transport.opts = g_rdma_ut_transport_opts; 888 rtransport.data_wr_pool = NULL; 889 rtransport.transport.data_buf_pool = NULL; 890 891 device.attr.device_cap_flags = 0; 892 device.map = NULL; 893 g_rdma_mr.lkey = 0xABCD; 894 sgl->keyed.key = 0xEEEE; 895 sgl->address = 0xFFFF; 896 rdma_req.recv->buf = (void *)0xDDDD; 897 898 /* Test 1: sgl type: keyed data block subtype: address */ 899 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 900 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 901 902 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 903 MOCK_SET(spdk_mempool_get, (void *)0x2000); 904 reset_nvmf_rdma_request(&rdma_req); 905 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 906 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 907 0, 0, 0, 0, 0); 908 rdma_req.req.dif.dif_insert_or_strip = true; 909 rtransport.transport.opts.io_unit_size = data_bs * 8; 910 sgl->keyed.length = data_bs * 4; 911 912 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 913 914 CU_ASSERT(rc == 0); 915 CU_ASSERT(rdma_req.req.data_from_pool == true); 916 CU_ASSERT(rdma_req.req.length == data_bs * 4); 917 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 918 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 919 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 920 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 921 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 922 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 923 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 924 925 for (i = 0; i < 4; ++i) { 926 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 927 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 928 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 929 } 930 931 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 932 block size 512 */ 933 MOCK_SET(spdk_mempool_get, (void *)0x2000); 934 reset_nvmf_rdma_request(&rdma_req); 935 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 936 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 937 0, 0, 0, 0, 0); 938 rdma_req.req.dif.dif_insert_or_strip = true; 939 rtransport.transport.opts.io_unit_size = data_bs * 4; 940 sgl->keyed.length = data_bs * 4; 941 942 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 943 944 CU_ASSERT(rc == 0); 945 CU_ASSERT(rdma_req.req.data_from_pool == true); 946 CU_ASSERT(rdma_req.req.length == data_bs * 4); 947 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 948 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 949 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 950 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 951 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 952 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 953 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 954 955 for (i = 0; i < 3; ++i) { 956 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 957 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 958 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 959 } 960 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 961 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 962 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 963 964 /* 2nd buffer consumed */ 965 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 966 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 967 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 968 969 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 970 MOCK_SET(spdk_mempool_get, (void *)0x2000); 971 reset_nvmf_rdma_request(&rdma_req); 972 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 973 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 974 0, 0, 0, 0, 0); 975 rdma_req.req.dif.dif_insert_or_strip = true; 976 rtransport.transport.opts.io_unit_size = data_bs; 977 sgl->keyed.length = data_bs; 978 979 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 980 981 CU_ASSERT(rc == 0); 982 CU_ASSERT(rdma_req.req.data_from_pool == true); 983 CU_ASSERT(rdma_req.req.length == data_bs); 984 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 985 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 986 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 987 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 988 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 989 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 990 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 991 992 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 993 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 994 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 995 996 CU_ASSERT(rdma_req.req.iovcnt == 2); 997 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 998 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 999 /* 2nd buffer consumed for metadata */ 1000 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1001 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1002 1003 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1004 block size 512 */ 1005 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1006 reset_nvmf_rdma_request(&rdma_req); 1007 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1008 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1009 0, 0, 0, 0, 0); 1010 rdma_req.req.dif.dif_insert_or_strip = true; 1011 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1012 sgl->keyed.length = data_bs * 4; 1013 1014 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1015 1016 CU_ASSERT(rc == 0); 1017 CU_ASSERT(rdma_req.req.data_from_pool == true); 1018 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1019 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1020 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1021 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1022 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1023 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1024 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1025 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1026 1027 for (i = 0; i < 4; ++i) { 1028 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1029 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1030 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1031 } 1032 1033 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1034 block size 512 */ 1035 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1036 reset_nvmf_rdma_request(&rdma_req); 1037 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1038 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1039 0, 0, 0, 0, 0); 1040 rdma_req.req.dif.dif_insert_or_strip = true; 1041 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1042 sgl->keyed.length = data_bs * 4; 1043 1044 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1045 1046 CU_ASSERT(rc == 0); 1047 CU_ASSERT(rdma_req.req.data_from_pool == true); 1048 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1049 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1050 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1051 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1052 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1053 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1054 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1055 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1056 1057 for (i = 0; i < 2; ++i) { 1058 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1059 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1060 } 1061 for (i = 0; i < 2; ++i) { 1062 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1063 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1064 } 1065 1066 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1067 block size 512 */ 1068 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1069 reset_nvmf_rdma_request(&rdma_req); 1070 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1071 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1072 0, 0, 0, 0, 0); 1073 rdma_req.req.dif.dif_insert_or_strip = true; 1074 rtransport.transport.opts.io_unit_size = data_bs * 4; 1075 sgl->keyed.length = data_bs * 6; 1076 1077 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1078 1079 CU_ASSERT(rc == 0); 1080 CU_ASSERT(rdma_req.req.data_from_pool == true); 1081 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1082 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1083 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1084 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1085 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1086 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1087 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1088 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1089 1090 for (i = 0; i < 3; ++i) { 1091 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1092 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1093 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1094 } 1095 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1096 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1097 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1098 1099 /* 2nd IO buffer consumed */ 1100 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1101 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1102 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1103 1104 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1105 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1106 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1107 1108 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1109 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1110 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1111 1112 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1113 one WR can hold. Additional WR is chained */ 1114 MOCK_SET(spdk_mempool_get, &data); 1115 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1116 ~NVMF_DATA_BUFFER_MASK); 1117 reset_nvmf_rdma_request(&rdma_req); 1118 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1119 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1120 0, 0, 0, 0, 0); 1121 rdma_req.req.dif.dif_insert_or_strip = true; 1122 rtransport.transport.opts.io_unit_size = data_bs * 16; 1123 sgl->keyed.length = data_bs * 16; 1124 1125 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1126 1127 CU_ASSERT(rc == 0); 1128 CU_ASSERT(rdma_req.req.data_from_pool == true); 1129 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1130 CU_ASSERT(rdma_req.req.iovcnt == 2); 1131 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1132 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1133 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1134 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1135 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1136 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1137 /* additional wr from pool */ 1138 CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr); 1139 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1140 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1141 1142 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1143 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1144 reset_nvmf_rdma_request(&rdma_req); 1145 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1146 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1147 0, 0, 0, 0, 0); 1148 rdma_req.req.dif.dif_insert_or_strip = true; 1149 rtransport.transport.opts.io_unit_size = 516; 1150 sgl->keyed.length = data_bs * 2; 1151 1152 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1153 1154 CU_ASSERT(rc == 0); 1155 CU_ASSERT(rdma_req.req.data_from_pool == true); 1156 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1157 CU_ASSERT(rdma_req.req.iovcnt == 3); 1158 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1159 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1160 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1161 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1162 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1163 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1164 1165 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1166 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1167 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1168 1169 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1170 is located at the beginning of that buffer */ 1171 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1172 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1173 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1174 1175 /* Test 9 dealing with a buffer split over two Memory Regions */ 1176 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1177 reset_nvmf_rdma_request(&rdma_req); 1178 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1179 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1180 0, 0, 0, 0, 0); 1181 rdma_req.req.dif.dif_insert_or_strip = true; 1182 rtransport.transport.opts.io_unit_size = data_bs * 4; 1183 sgl->keyed.length = data_bs * 2; 1184 g_mr_size = data_bs; 1185 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1186 1187 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1188 SPDK_CU_ASSERT_FATAL(rc == 0); 1189 CU_ASSERT(rdma_req.req.data_from_pool == true); 1190 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1191 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1192 ~NVMF_DATA_BUFFER_MASK)); 1193 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1194 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1195 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1196 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1197 for (i = 0; i < 2; i++) { 1198 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1199 (data_bs + md_size)); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1201 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1202 } 1203 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1204 CU_ASSERT(buffer_ptr == &buffer); 1205 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1206 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1207 g_mr_size = 0; 1208 g_mr_next_size = 0; 1209 1210 /* Test 2: Multi SGL */ 1211 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1212 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1213 sgl->address = 0; 1214 rdma_req.recv->buf = (void *)&sgl_desc; 1215 MOCK_SET(spdk_mempool_get, &data); 1216 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1217 ~NVMF_DATA_BUFFER_MASK); 1218 1219 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1220 reset_nvmf_rdma_request(&rdma_req); 1221 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1222 SPDK_DIF_TYPE1, 1223 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1224 rdma_req.req.dif.dif_insert_or_strip = true; 1225 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1226 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1227 1228 for (i = 0; i < 2; i++) { 1229 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1230 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1231 sgl_desc[i].keyed.length = data_bs * 4; 1232 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1233 sgl_desc[i].keyed.key = 0x44; 1234 } 1235 1236 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1237 1238 CU_ASSERT(rc == 0); 1239 CU_ASSERT(rdma_req.req.data_from_pool == true); 1240 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1241 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1242 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1243 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1244 for (i = 0; i < 4; ++i) { 1245 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1246 (data_bs + md_size)); 1247 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1248 } 1249 1250 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1251 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1252 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1253 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1254 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1255 CU_ASSERT(data.wr.num_sge == 4); 1256 for (i = 0; i < 4; ++i) { 1257 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1258 (data_bs + md_size)); 1259 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1260 } 1261 1262 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1263 } 1264 1265 int main(int argc, char **argv) 1266 { 1267 CU_pSuite suite = NULL; 1268 unsigned int num_failures; 1269 1270 CU_set_error_action(CUEA_ABORT); 1271 CU_initialize_registry(); 1272 1273 suite = CU_add_suite("nvmf", NULL, NULL); 1274 1275 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1276 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1277 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1278 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1279 1280 CU_basic_set_mode(CU_BRM_VERBOSE); 1281 CU_basic_run_tests(); 1282 num_failures = CU_get_number_of_failures(); 1283 CU_cleanup_registry(); 1284 return num_failures; 1285 } 1286