1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "nvmf/rdma.c" 38 #include "nvmf/transport.c" 39 40 uint64_t g_mr_size; 41 uint64_t g_mr_next_size; 42 struct ibv_mr g_rdma_mr; 43 44 #define RDMA_UT_UNITS_IN_MAX_IO 16 45 46 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 47 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 48 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 49 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 50 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 51 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 52 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 53 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 54 }; 55 56 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 57 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 58 uint64_t size, uint64_t translation), 0); 59 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 60 uint64_t size), 0); 61 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 62 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 63 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 64 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 65 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 66 67 struct spdk_trace_histories *g_trace_histories; 68 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 69 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 70 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 71 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 72 uint8_t arg1_type, const char *arg1_name)); 73 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 74 uint32_t size, uint64_t object_id, uint64_t arg1)); 75 76 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 77 struct spdk_nvmf_ctrlr_data *cdata)); 78 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 79 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 80 const struct spdk_nvme_transport_id *trid2), 0); 81 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 82 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 83 struct spdk_dif_ctx *dif_ctx), false); 84 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 85 enum spdk_nvme_transport_type trtype)); 86 87 const char * 88 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 89 { 90 switch (trtype) { 91 case SPDK_NVME_TRANSPORT_PCIE: 92 return "PCIe"; 93 case SPDK_NVME_TRANSPORT_RDMA: 94 return "RDMA"; 95 case SPDK_NVME_TRANSPORT_FC: 96 return "FC"; 97 default: 98 return NULL; 99 } 100 } 101 102 int 103 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 104 { 105 int len, i; 106 107 if (trstring == NULL) { 108 return -EINVAL; 109 } 110 111 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 112 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 113 return -EINVAL; 114 } 115 116 /* cast official trstring to uppercase version of input. */ 117 for (i = 0; i < len; i++) { 118 trid->trstring[i] = toupper(trstring[i]); 119 } 120 return 0; 121 } 122 123 uint64_t 124 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 125 { 126 if (g_mr_size != 0) { 127 *(uint32_t *)size = g_mr_size; 128 if (g_mr_next_size != 0) { 129 g_mr_size = g_mr_next_size; 130 } 131 } 132 133 return (uint64_t)&g_rdma_mr; 134 } 135 136 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 137 { 138 int i; 139 140 rdma_req->req.length = 0; 141 rdma_req->req.data_from_pool = false; 142 rdma_req->req.data = NULL; 143 rdma_req->data.wr.num_sge = 0; 144 rdma_req->data.wr.wr.rdma.remote_addr = 0; 145 rdma_req->data.wr.wr.rdma.rkey = 0; 146 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 147 148 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 149 rdma_req->req.iov[i].iov_base = 0; 150 rdma_req->req.iov[i].iov_len = 0; 151 rdma_req->req.buffers[i] = 0; 152 rdma_req->data.wr.sg_list[i].addr = 0; 153 rdma_req->data.wr.sg_list[i].length = 0; 154 rdma_req->data.wr.sg_list[i].lkey = 0; 155 } 156 rdma_req->req.iovcnt = 0; 157 } 158 159 static void 160 test_spdk_nvmf_rdma_request_parse_sgl(void) 161 { 162 struct spdk_nvmf_rdma_transport rtransport; 163 struct spdk_nvmf_rdma_device device; 164 struct spdk_nvmf_rdma_request rdma_req = {}; 165 struct spdk_nvmf_rdma_recv recv; 166 struct spdk_nvmf_rdma_poll_group group; 167 struct spdk_nvmf_rdma_qpair rqpair; 168 struct spdk_nvmf_rdma_poller poller; 169 union nvmf_c2h_msg cpl; 170 union nvmf_h2c_msg cmd; 171 struct spdk_nvme_sgl_descriptor *sgl; 172 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 173 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 174 struct spdk_nvmf_rdma_request_data data; 175 struct spdk_nvmf_transport_pg_cache_buf buffer; 176 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 177 int rc, i; 178 179 data.wr.sg_list = data.sgl; 180 STAILQ_INIT(&group.group.buf_cache); 181 group.group.buf_cache_size = 0; 182 group.group.buf_cache_count = 0; 183 group.group.transport = &rtransport.transport; 184 STAILQ_INIT(&group.retired_bufs); 185 poller.group = &group; 186 rqpair.poller = &poller; 187 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 188 189 sgl = &cmd.nvme_cmd.dptr.sgl1; 190 rdma_req.recv = &recv; 191 rdma_req.req.cmd = &cmd; 192 rdma_req.req.rsp = &cpl; 193 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 194 rdma_req.req.qpair = &rqpair.qpair; 195 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 196 197 rtransport.transport.opts = g_rdma_ut_transport_opts; 198 rtransport.data_wr_pool = NULL; 199 rtransport.transport.data_buf_pool = NULL; 200 201 device.attr.device_cap_flags = 0; 202 g_rdma_mr.lkey = 0xABCD; 203 sgl->keyed.key = 0xEEEE; 204 sgl->address = 0xFFFF; 205 rdma_req.recv->buf = (void *)0xDDDD; 206 207 /* Test 1: sgl type: keyed data block subtype: address */ 208 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 209 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 210 211 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 212 MOCK_SET(spdk_mempool_get, (void *)0x2000); 213 reset_nvmf_rdma_request(&rdma_req); 214 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 215 216 device.map = (void *)0x0; 217 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 218 CU_ASSERT(rc == 0); 219 CU_ASSERT(rdma_req.req.data_from_pool == true); 220 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 221 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 222 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 223 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 224 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 225 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 226 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 227 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 228 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 229 230 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 231 reset_nvmf_rdma_request(&rdma_req); 232 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 233 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 234 235 CU_ASSERT(rc == 0); 236 CU_ASSERT(rdma_req.req.data_from_pool == true); 237 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 238 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 239 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 240 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 241 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 242 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 243 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 244 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 245 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 246 } 247 248 /* Part 3: simple I/O one SGL larger than the transport max io size */ 249 reset_nvmf_rdma_request(&rdma_req); 250 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 251 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 252 253 CU_ASSERT(rc == -1); 254 255 /* Part 4: Pretend there are no buffer pools */ 256 MOCK_SET(spdk_mempool_get, NULL); 257 reset_nvmf_rdma_request(&rdma_req); 258 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 259 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 260 261 CU_ASSERT(rc == 0); 262 CU_ASSERT(rdma_req.req.data_from_pool == false); 263 CU_ASSERT(rdma_req.req.data == NULL); 264 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 265 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 266 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 267 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 268 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 269 270 rdma_req.recv->buf = (void *)0xDDDD; 271 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 272 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 273 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 274 275 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 276 reset_nvmf_rdma_request(&rdma_req); 277 sgl->address = 0; 278 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 279 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 280 281 CU_ASSERT(rc == 0); 282 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 283 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 284 CU_ASSERT(rdma_req.req.data_from_pool == false); 285 286 /* Part 2: I/O offset + length too large */ 287 reset_nvmf_rdma_request(&rdma_req); 288 sgl->address = rtransport.transport.opts.in_capsule_data_size; 289 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 290 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 291 292 CU_ASSERT(rc == -1); 293 294 /* Part 3: I/O too large */ 295 reset_nvmf_rdma_request(&rdma_req); 296 sgl->address = 0; 297 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 298 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 299 300 CU_ASSERT(rc == -1); 301 302 /* Test 3: Multi SGL */ 303 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 304 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 305 sgl->address = 0; 306 rdma_req.recv->buf = (void *)&sgl_desc; 307 MOCK_SET(spdk_mempool_get, &data); 308 309 /* part 1: 2 segments each with 1 wr. */ 310 reset_nvmf_rdma_request(&rdma_req); 311 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 312 for (i = 0; i < 2; i++) { 313 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 314 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 315 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 316 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 317 sgl_desc[i].keyed.key = 0x44; 318 } 319 320 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 321 322 CU_ASSERT(rc == 0); 323 CU_ASSERT(rdma_req.req.data_from_pool == true); 324 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 325 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 326 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 327 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 328 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 329 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 330 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 331 CU_ASSERT(data.wr.num_sge == 1); 332 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 333 334 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 335 reset_nvmf_rdma_request(&rdma_req); 336 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 337 for (i = 0; i < 2; i++) { 338 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 339 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 340 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 341 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 342 sgl_desc[i].keyed.key = 0x44; 343 } 344 345 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 346 347 CU_ASSERT(rc == 0); 348 CU_ASSERT(rdma_req.req.data_from_pool == true); 349 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 350 CU_ASSERT(rdma_req.req.iovcnt == 16); 351 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 352 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 353 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 354 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 355 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 356 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 357 CU_ASSERT(data.wr.num_sge == 8); 358 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 359 360 /* part 3: 2 segments, one very large, one very small */ 361 reset_nvmf_rdma_request(&rdma_req); 362 for (i = 0; i < 2; i++) { 363 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 364 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 365 sgl_desc[i].keyed.key = 0x44; 366 } 367 368 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 369 rtransport.transport.opts.io_unit_size / 2; 370 sgl_desc[0].address = 0x4000; 371 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 372 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 373 rtransport.transport.opts.io_unit_size / 2; 374 375 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 376 377 CU_ASSERT(rc == 0); 378 CU_ASSERT(rdma_req.req.data_from_pool == true); 379 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 380 CU_ASSERT(rdma_req.req.iovcnt == 17); 381 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 382 for (i = 0; i < 15; i++) { 383 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 384 } 385 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 386 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 387 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 388 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 389 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 390 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 391 rtransport.transport.opts.io_unit_size / 2); 392 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 393 CU_ASSERT(data.wr.num_sge == 1); 394 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 395 396 /* Test 4: use PG buffer cache */ 397 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 398 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 399 sgl->address = 0xFFFF; 400 rdma_req.recv->buf = (void *)0xDDDD; 401 g_rdma_mr.lkey = 0xABCD; 402 sgl->keyed.key = 0xEEEE; 403 404 for (i = 0; i < 4; i++) { 405 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 406 } 407 408 /* part 1: use the four buffers from the pg cache */ 409 group.group.buf_cache_size = 4; 410 group.group.buf_cache_count = 4; 411 MOCK_SET(spdk_mempool_get, (void *)0x2000); 412 reset_nvmf_rdma_request(&rdma_req); 413 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 414 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 415 416 SPDK_CU_ASSERT_FATAL(rc == 0); 417 CU_ASSERT(rdma_req.req.data_from_pool == true); 418 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 419 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 420 ~NVMF_DATA_BUFFER_MASK)); 421 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 422 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 423 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 424 CU_ASSERT(group.group.buf_cache_count == 0); 425 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 426 for (i = 0; i < 4; i++) { 427 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 428 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 429 ~NVMF_DATA_BUFFER_MASK)); 430 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 431 } 432 433 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 434 reset_nvmf_rdma_request(&rdma_req); 435 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 436 437 SPDK_CU_ASSERT_FATAL(rc == 0); 438 CU_ASSERT(rdma_req.req.data_from_pool == true); 439 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 440 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 441 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 442 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 443 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 444 CU_ASSERT(group.group.buf_cache_count == 0); 445 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 446 for (i = 0; i < 4; i++) { 447 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 448 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 449 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 450 CU_ASSERT(group.group.buf_cache_count == 0); 451 } 452 453 /* part 3: half and half */ 454 group.group.buf_cache_count = 2; 455 456 for (i = 0; i < 2; i++) { 457 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 458 } 459 reset_nvmf_rdma_request(&rdma_req); 460 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 461 462 SPDK_CU_ASSERT_FATAL(rc == 0); 463 CU_ASSERT(rdma_req.req.data_from_pool == true); 464 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 465 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 466 ~NVMF_DATA_BUFFER_MASK)); 467 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 468 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 469 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 470 CU_ASSERT(group.group.buf_cache_count == 0); 471 for (i = 0; i < 2; i++) { 472 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 473 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 474 ~NVMF_DATA_BUFFER_MASK)); 475 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 476 } 477 for (i = 2; i < 4; i++) { 478 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 480 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 481 } 482 483 reset_nvmf_rdma_request(&rdma_req); 484 /* Test 5 dealing with a buffer split over two Memory Regions */ 485 MOCK_SET(spdk_mempool_get, (void *)&buffer); 486 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 487 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 488 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 489 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 490 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 491 492 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 493 SPDK_CU_ASSERT_FATAL(rc == 0); 494 CU_ASSERT(rdma_req.req.data_from_pool == true); 495 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 496 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 497 ~NVMF_DATA_BUFFER_MASK)); 498 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 499 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 500 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 501 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 502 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 503 ~NVMF_DATA_BUFFER_MASK)); 504 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 505 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 506 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 507 CU_ASSERT(buffer_ptr == &buffer); 508 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 509 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 510 g_mr_size = 0; 511 g_mr_next_size = 0; 512 513 reset_nvmf_rdma_request(&rdma_req); 514 } 515 516 static struct spdk_nvmf_rdma_recv * 517 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 518 { 519 struct spdk_nvmf_rdma_recv *rdma_recv; 520 union nvmf_h2c_msg *cmd; 521 struct spdk_nvme_sgl_descriptor *sgl; 522 523 rdma_recv = calloc(1, sizeof(*rdma_recv)); 524 rdma_recv->qpair = rqpair; 525 cmd = calloc(1, sizeof(*cmd)); 526 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 527 cmd->nvme_cmd.opc = opc; 528 sgl = &cmd->nvme_cmd.dptr.sgl1; 529 sgl->keyed.key = 0xEEEE; 530 sgl->address = 0xFFFF; 531 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 532 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 533 sgl->keyed.length = 1; 534 535 return rdma_recv; 536 } 537 538 static void 539 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 540 { 541 free((void *)rdma_recv->sgl[0].addr); 542 free(rdma_recv); 543 } 544 545 static struct spdk_nvmf_rdma_request * 546 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 547 struct spdk_nvmf_rdma_recv *rdma_recv) 548 { 549 struct spdk_nvmf_rdma_request *rdma_req; 550 union nvmf_c2h_msg *cpl; 551 552 rdma_req = calloc(1, sizeof(*rdma_req)); 553 rdma_req->recv = rdma_recv; 554 rdma_req->req.qpair = &rqpair->qpair; 555 rdma_req->state = RDMA_REQUEST_STATE_NEW; 556 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 557 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 558 cpl = calloc(1, sizeof(*cpl)); 559 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 560 rdma_req->req.rsp = cpl; 561 562 return rdma_req; 563 } 564 565 static void 566 free_req(struct spdk_nvmf_rdma_request *rdma_req) 567 { 568 free((void *)rdma_req->rsp.sgl[0].addr); 569 free(rdma_req); 570 } 571 572 static void 573 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 574 struct spdk_nvmf_rdma_poller *poller, 575 struct spdk_nvmf_rdma_device *device, 576 struct spdk_nvmf_rdma_resources *resources) 577 { 578 memset(rqpair, 0, sizeof(*rqpair)); 579 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 580 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 581 rqpair->poller = poller; 582 rqpair->device = device; 583 rqpair->resources = resources; 584 rqpair->qpair.qid = 1; 585 rqpair->ibv_state = IBV_QPS_RTS; 586 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 587 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 588 rqpair->max_send_depth = 16; 589 rqpair->max_read_depth = 16; 590 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 591 } 592 593 static void 594 poller_reset(struct spdk_nvmf_rdma_poller *poller, 595 struct spdk_nvmf_rdma_poll_group *group) 596 { 597 memset(poller, 0, sizeof(*poller)); 598 STAILQ_INIT(&poller->qpairs_pending_recv); 599 STAILQ_INIT(&poller->qpairs_pending_send); 600 poller->group = group; 601 } 602 603 static void 604 test_spdk_nvmf_rdma_request_process(void) 605 { 606 struct spdk_nvmf_rdma_transport rtransport = {}; 607 struct spdk_nvmf_rdma_poll_group group = {}; 608 struct spdk_nvmf_rdma_poller poller = {}; 609 struct spdk_nvmf_rdma_device device = {}; 610 struct spdk_nvmf_rdma_resources resources = {}; 611 struct spdk_nvmf_rdma_qpair rqpair = {}; 612 struct spdk_nvmf_rdma_recv *rdma_recv; 613 struct spdk_nvmf_rdma_request *rdma_req; 614 bool progress; 615 616 STAILQ_INIT(&group.group.buf_cache); 617 STAILQ_INIT(&group.group.pending_buf_queue); 618 group.group.buf_cache_size = 0; 619 group.group.buf_cache_count = 0; 620 poller_reset(&poller, &group); 621 qpair_reset(&rqpair, &poller, &device, &resources); 622 623 rtransport.transport.opts = g_rdma_ut_transport_opts; 624 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 625 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 626 sizeof(struct spdk_nvmf_rdma_request_data), 627 0, 0); 628 MOCK_CLEAR(spdk_mempool_get); 629 630 device.attr.device_cap_flags = 0; 631 device.map = (void *)0x0; 632 g_rdma_mr.lkey = 0xABCD; 633 634 /* Test 1: single SGL READ request */ 635 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 636 rdma_req = create_req(&rqpair, rdma_recv); 637 rqpair.current_recv_depth = 1; 638 /* NEW -> EXECUTING */ 639 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 640 CU_ASSERT(progress == true); 641 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 642 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 643 /* EXECUTED -> TRANSFERRING_C2H */ 644 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 645 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 646 CU_ASSERT(progress == true); 647 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 648 CU_ASSERT(rdma_req->recv == NULL); 649 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 650 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 651 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 652 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 653 /* COMPLETED -> FREE */ 654 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 655 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 656 CU_ASSERT(progress == true); 657 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 658 659 free_recv(rdma_recv); 660 free_req(rdma_req); 661 poller_reset(&poller, &group); 662 qpair_reset(&rqpair, &poller, &device, &resources); 663 664 /* Test 2: single SGL WRITE request */ 665 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 666 rdma_req = create_req(&rqpair, rdma_recv); 667 rqpair.current_recv_depth = 1; 668 /* NEW -> TRANSFERRING_H2C */ 669 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 670 CU_ASSERT(progress == true); 671 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 672 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 673 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 674 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->data.wr); 675 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 676 STAILQ_INIT(&poller.qpairs_pending_send); 677 /* READY_TO_EXECUTE -> EXECUTING */ 678 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 679 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 680 CU_ASSERT(progress == true); 681 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 682 /* EXECUTED -> COMPLETING */ 683 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 684 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 685 CU_ASSERT(progress == true); 686 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 687 CU_ASSERT(rdma_req->recv == NULL); 688 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->rsp.wr); 689 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 690 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 691 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 692 /* COMPLETED -> FREE */ 693 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 694 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 695 CU_ASSERT(progress == true); 696 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 697 698 free_recv(rdma_recv); 699 free_req(rdma_req); 700 poller_reset(&poller, &group); 701 qpair_reset(&rqpair, &poller, &device, &resources); 702 703 /* Test 3: WRITE+WRITE ibv_send batching */ 704 { 705 struct spdk_nvmf_rdma_recv *recv1, *recv2; 706 struct spdk_nvmf_rdma_request *req1, *req2; 707 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 708 req1 = create_req(&rqpair, recv1); 709 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 710 req2 = create_req(&rqpair, recv2); 711 712 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 713 rqpair.current_recv_depth = 1; 714 spdk_nvmf_rdma_request_process(&rtransport, req1); 715 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 716 /* WRITE 1 is the first in batching list */ 717 CU_ASSERT(rqpair.sends_to_post.first == &req1->data.wr); 718 CU_ASSERT(rqpair.sends_to_post.last == &req1->data.wr); 719 720 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 721 rqpair.current_recv_depth = 2; 722 spdk_nvmf_rdma_request_process(&rtransport, req2); 723 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 724 /* WRITE 2 is now also in the batching list */ 725 CU_ASSERT(rqpair.sends_to_post.first->next == &req2->data.wr); 726 CU_ASSERT(rqpair.sends_to_post.last == &req2->data.wr); 727 728 /* Send everything */ 729 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 730 STAILQ_INIT(&poller.qpairs_pending_send); 731 732 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 733 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 734 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 735 spdk_nvmf_rdma_request_process(&rtransport, req1); 736 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 737 /* WRITE 1: EXECUTED -> COMPLETING */ 738 req1->state = RDMA_REQUEST_STATE_EXECUTED; 739 spdk_nvmf_rdma_request_process(&rtransport, req1); 740 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 741 CU_ASSERT(rqpair.sends_to_post.first == &req1->rsp.wr); 742 CU_ASSERT(rqpair.sends_to_post.last == &req1->rsp.wr); 743 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 744 STAILQ_INIT(&poller.qpairs_pending_send); 745 /* WRITE 1: COMPLETED -> FREE */ 746 req1->state = RDMA_REQUEST_STATE_COMPLETED; 747 spdk_nvmf_rdma_request_process(&rtransport, req1); 748 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 749 750 /* Now WRITE 2 has finished reading and completes */ 751 /* WRITE 2: COMPLETED -> FREE */ 752 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 753 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 754 spdk_nvmf_rdma_request_process(&rtransport, req2); 755 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 756 /* WRITE 1: EXECUTED -> COMPLETING */ 757 req2->state = RDMA_REQUEST_STATE_EXECUTED; 758 spdk_nvmf_rdma_request_process(&rtransport, req2); 759 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 760 CU_ASSERT(rqpair.sends_to_post.first == &req2->rsp.wr); 761 CU_ASSERT(rqpair.sends_to_post.last == &req2->rsp.wr); 762 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 763 STAILQ_INIT(&poller.qpairs_pending_send); 764 /* WRITE 1: COMPLETED -> FREE */ 765 req2->state = RDMA_REQUEST_STATE_COMPLETED; 766 spdk_nvmf_rdma_request_process(&rtransport, req2); 767 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 768 769 free_recv(recv1); 770 free_req(req1); 771 free_recv(recv2); 772 free_req(req2); 773 poller_reset(&poller, &group); 774 qpair_reset(&rqpair, &poller, &device, &resources); 775 } 776 777 spdk_mempool_free(rtransport.transport.data_buf_pool); 778 spdk_mempool_free(rtransport.data_wr_pool); 779 } 780 781 #define TEST_GROUPS_COUNT 5 782 static void 783 test_spdk_nvmf_rdma_get_optimal_poll_group(void) 784 { 785 struct spdk_nvmf_rdma_transport rtransport = {}; 786 struct spdk_nvmf_transport *transport = &rtransport.transport; 787 struct spdk_nvmf_rdma_qpair rqpair = {}; 788 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 789 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 790 struct spdk_nvmf_transport_poll_group *result; 791 uint32_t i; 792 793 rqpair.qpair.transport = transport; 794 pthread_mutex_init(&rtransport.lock, NULL); 795 TAILQ_INIT(&rtransport.poll_groups); 796 797 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 798 groups[i] = spdk_nvmf_rdma_poll_group_create(transport); 799 CU_ASSERT(groups[i] != NULL); 800 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 801 groups[i]->transport = transport; 802 } 803 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 804 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 805 806 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 807 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 808 rqpair.qpair.qid = 0; 809 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 810 CU_ASSERT(result == groups[i]); 811 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 812 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 813 814 rqpair.qpair.qid = 1; 815 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 816 CU_ASSERT(result == groups[i]); 817 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 818 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 819 } 820 /* wrap around, admin/io pg point to the first pg 821 Destroy all poll groups except of the last one */ 822 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 823 spdk_nvmf_rdma_poll_group_destroy(groups[i]); 824 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 825 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 826 } 827 828 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 829 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 830 831 /* Check that pointers to the next admin/io poll groups are not changed */ 832 rqpair.qpair.qid = 0; 833 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 834 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 835 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 836 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 837 838 rqpair.qpair.qid = 1; 839 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 840 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 841 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 842 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 843 844 /* Remove the last poll group, check that pointers are NULL */ 845 spdk_nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 846 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 847 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 848 849 /* Request optimal poll group, result must be NULL */ 850 rqpair.qpair.qid = 0; 851 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 852 CU_ASSERT(result == NULL); 853 854 rqpair.qpair.qid = 1; 855 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 856 CU_ASSERT(result == NULL); 857 858 pthread_mutex_destroy(&rtransport.lock); 859 } 860 #undef TEST_GROUPS_COUNT 861 862 static void 863 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 864 { 865 struct spdk_nvmf_rdma_transport rtransport; 866 struct spdk_nvmf_rdma_device device; 867 struct spdk_nvmf_rdma_request rdma_req = {}; 868 struct spdk_nvmf_rdma_recv recv; 869 struct spdk_nvmf_rdma_poll_group group; 870 struct spdk_nvmf_rdma_qpair rqpair; 871 struct spdk_nvmf_rdma_poller poller; 872 union nvmf_c2h_msg cpl; 873 union nvmf_h2c_msg cmd; 874 struct spdk_nvme_sgl_descriptor *sgl; 875 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 876 struct spdk_nvmf_rdma_request_data data; 877 struct spdk_nvmf_transport_pg_cache_buf buffer; 878 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 879 const uint32_t data_bs = 512; 880 const uint32_t md_size = 8; 881 int rc, i; 882 void *aligned_buffer; 883 884 data.wr.sg_list = data.sgl; 885 STAILQ_INIT(&group.group.buf_cache); 886 group.group.buf_cache_size = 0; 887 group.group.buf_cache_count = 0; 888 group.group.transport = &rtransport.transport; 889 STAILQ_INIT(&group.retired_bufs); 890 poller.group = &group; 891 rqpair.poller = &poller; 892 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 893 894 sgl = &cmd.nvme_cmd.dptr.sgl1; 895 rdma_req.recv = &recv; 896 rdma_req.req.cmd = &cmd; 897 rdma_req.req.rsp = &cpl; 898 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 899 rdma_req.req.qpair = &rqpair.qpair; 900 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 901 902 rtransport.transport.opts = g_rdma_ut_transport_opts; 903 rtransport.data_wr_pool = NULL; 904 rtransport.transport.data_buf_pool = NULL; 905 906 device.attr.device_cap_flags = 0; 907 device.map = NULL; 908 g_rdma_mr.lkey = 0xABCD; 909 sgl->keyed.key = 0xEEEE; 910 sgl->address = 0xFFFF; 911 rdma_req.recv->buf = (void *)0xDDDD; 912 913 /* Test 1: sgl type: keyed data block subtype: address */ 914 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 915 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 916 917 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 918 MOCK_SET(spdk_mempool_get, (void *)0x2000); 919 reset_nvmf_rdma_request(&rdma_req); 920 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 921 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 922 0, 0, 0, 0, 0); 923 rdma_req.req.dif.dif_insert_or_strip = true; 924 rtransport.transport.opts.io_unit_size = data_bs * 8; 925 sgl->keyed.length = data_bs * 4; 926 927 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 928 929 CU_ASSERT(rc == 0); 930 CU_ASSERT(rdma_req.req.data_from_pool == true); 931 CU_ASSERT(rdma_req.req.length == data_bs * 4); 932 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 933 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 934 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 935 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 936 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 937 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 938 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 939 940 for (i = 0; i < 4; ++i) { 941 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 942 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 943 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 944 } 945 946 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 947 block size 512 */ 948 MOCK_SET(spdk_mempool_get, (void *)0x2000); 949 reset_nvmf_rdma_request(&rdma_req); 950 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 951 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 952 0, 0, 0, 0, 0); 953 rdma_req.req.dif.dif_insert_or_strip = true; 954 rtransport.transport.opts.io_unit_size = data_bs * 4; 955 sgl->keyed.length = data_bs * 4; 956 957 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 958 959 CU_ASSERT(rc == 0); 960 CU_ASSERT(rdma_req.req.data_from_pool == true); 961 CU_ASSERT(rdma_req.req.length == data_bs * 4); 962 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 963 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 964 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 965 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 966 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 967 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 968 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 969 970 for (i = 0; i < 3; ++i) { 971 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 972 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 973 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 974 } 975 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 976 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 977 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 978 979 /* 2nd buffer consumed */ 980 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 981 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 982 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 983 984 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 985 MOCK_SET(spdk_mempool_get, (void *)0x2000); 986 reset_nvmf_rdma_request(&rdma_req); 987 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 988 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 989 0, 0, 0, 0, 0); 990 rdma_req.req.dif.dif_insert_or_strip = true; 991 rtransport.transport.opts.io_unit_size = data_bs; 992 sgl->keyed.length = data_bs; 993 994 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 995 996 CU_ASSERT(rc == 0); 997 CU_ASSERT(rdma_req.req.data_from_pool == true); 998 CU_ASSERT(rdma_req.req.length == data_bs); 999 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1000 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1001 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1002 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1003 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1004 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1005 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1006 1007 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1008 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1009 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1010 1011 CU_ASSERT(rdma_req.req.iovcnt == 2); 1012 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1013 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1014 /* 2nd buffer consumed for metadata */ 1015 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1016 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1017 1018 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1019 block size 512 */ 1020 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1021 reset_nvmf_rdma_request(&rdma_req); 1022 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1023 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1024 0, 0, 0, 0, 0); 1025 rdma_req.req.dif.dif_insert_or_strip = true; 1026 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1027 sgl->keyed.length = data_bs * 4; 1028 1029 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1030 1031 CU_ASSERT(rc == 0); 1032 CU_ASSERT(rdma_req.req.data_from_pool == true); 1033 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1034 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1035 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1036 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1037 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1038 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1039 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1040 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1041 1042 for (i = 0; i < 4; ++i) { 1043 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1044 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1045 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1046 } 1047 1048 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1049 block size 512 */ 1050 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1051 reset_nvmf_rdma_request(&rdma_req); 1052 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1053 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1054 0, 0, 0, 0, 0); 1055 rdma_req.req.dif.dif_insert_or_strip = true; 1056 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1057 sgl->keyed.length = data_bs * 4; 1058 1059 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1060 1061 CU_ASSERT(rc == 0); 1062 CU_ASSERT(rdma_req.req.data_from_pool == true); 1063 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1064 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1065 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1066 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1067 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1068 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1069 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1070 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1071 1072 for (i = 0; i < 2; ++i) { 1073 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1074 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1075 } 1076 for (i = 0; i < 2; ++i) { 1077 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1078 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1079 } 1080 1081 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1082 block size 512 */ 1083 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1084 reset_nvmf_rdma_request(&rdma_req); 1085 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1086 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1087 0, 0, 0, 0, 0); 1088 rdma_req.req.dif.dif_insert_or_strip = true; 1089 rtransport.transport.opts.io_unit_size = data_bs * 4; 1090 sgl->keyed.length = data_bs * 6; 1091 1092 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1093 1094 CU_ASSERT(rc == 0); 1095 CU_ASSERT(rdma_req.req.data_from_pool == true); 1096 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1097 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1098 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1099 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1100 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1101 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1102 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1103 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1104 1105 for (i = 0; i < 3; ++i) { 1106 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1107 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1108 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1109 } 1110 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1111 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1112 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1113 1114 /* 2nd IO buffer consumed */ 1115 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1116 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1117 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1118 1119 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1120 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1121 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1122 1123 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1124 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1125 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1126 1127 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1128 one WR can hold. Additional WR is chained */ 1129 MOCK_SET(spdk_mempool_get, &data); 1130 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1131 ~NVMF_DATA_BUFFER_MASK); 1132 reset_nvmf_rdma_request(&rdma_req); 1133 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1134 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1135 0, 0, 0, 0, 0); 1136 rdma_req.req.dif.dif_insert_or_strip = true; 1137 rtransport.transport.opts.io_unit_size = data_bs * 16; 1138 sgl->keyed.length = data_bs * 16; 1139 1140 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1141 1142 CU_ASSERT(rc == 0); 1143 CU_ASSERT(rdma_req.req.data_from_pool == true); 1144 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1145 CU_ASSERT(rdma_req.req.iovcnt == 2); 1146 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1147 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1148 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1149 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1150 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1151 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1152 /* additional wr from pool */ 1153 CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr); 1154 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1155 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1156 1157 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1158 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1159 reset_nvmf_rdma_request(&rdma_req); 1160 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1161 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1162 0, 0, 0, 0, 0); 1163 rdma_req.req.dif.dif_insert_or_strip = true; 1164 rtransport.transport.opts.io_unit_size = 516; 1165 sgl->keyed.length = data_bs * 2; 1166 1167 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1168 1169 CU_ASSERT(rc == 0); 1170 CU_ASSERT(rdma_req.req.data_from_pool == true); 1171 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1172 CU_ASSERT(rdma_req.req.iovcnt == 3); 1173 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1174 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1175 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1176 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1177 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1178 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1179 1180 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1181 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1182 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1183 1184 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1185 is located at the beginning of that buffer */ 1186 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1187 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1188 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1189 1190 /* Test 9 dealing with a buffer split over two Memory Regions */ 1191 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1192 reset_nvmf_rdma_request(&rdma_req); 1193 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1194 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1195 0, 0, 0, 0, 0); 1196 rdma_req.req.dif.dif_insert_or_strip = true; 1197 rtransport.transport.opts.io_unit_size = data_bs * 4; 1198 sgl->keyed.length = data_bs * 2; 1199 g_mr_size = data_bs; 1200 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1201 1202 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1203 SPDK_CU_ASSERT_FATAL(rc == 0); 1204 CU_ASSERT(rdma_req.req.data_from_pool == true); 1205 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1206 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1207 ~NVMF_DATA_BUFFER_MASK)); 1208 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1209 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1210 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1211 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1212 for (i = 0; i < 2; i++) { 1213 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1214 (data_bs + md_size)); 1215 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1216 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1217 } 1218 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1219 CU_ASSERT(buffer_ptr == &buffer); 1220 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1221 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1222 g_mr_size = 0; 1223 g_mr_next_size = 0; 1224 1225 /* Test 2: Multi SGL */ 1226 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1227 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1228 sgl->address = 0; 1229 rdma_req.recv->buf = (void *)&sgl_desc; 1230 MOCK_SET(spdk_mempool_get, &data); 1231 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1232 ~NVMF_DATA_BUFFER_MASK); 1233 1234 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1235 reset_nvmf_rdma_request(&rdma_req); 1236 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1237 SPDK_DIF_TYPE1, 1238 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1239 rdma_req.req.dif.dif_insert_or_strip = true; 1240 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1241 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1242 1243 for (i = 0; i < 2; i++) { 1244 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1245 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1246 sgl_desc[i].keyed.length = data_bs * 4; 1247 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1248 sgl_desc[i].keyed.key = 0x44; 1249 } 1250 1251 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1252 1253 CU_ASSERT(rc == 0); 1254 CU_ASSERT(rdma_req.req.data_from_pool == true); 1255 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1256 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1257 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1258 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1259 for (i = 0; i < 4; ++i) { 1260 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1261 (data_bs + md_size)); 1262 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1263 } 1264 1265 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1266 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1267 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1268 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1269 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1270 CU_ASSERT(data.wr.num_sge == 4); 1271 for (i = 0; i < 4; ++i) { 1272 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1273 (data_bs + md_size)); 1274 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1275 } 1276 1277 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1278 } 1279 1280 int main(int argc, char **argv) 1281 { 1282 CU_pSuite suite = NULL; 1283 unsigned int num_failures; 1284 1285 CU_set_error_action(CUEA_ABORT); 1286 CU_initialize_registry(); 1287 1288 suite = CU_add_suite("nvmf", NULL, NULL); 1289 1290 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1291 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1292 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_get_optimal_poll_group); 1293 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1294 1295 CU_basic_set_mode(CU_BRM_VERBOSE); 1296 CU_basic_run_tests(); 1297 num_failures = CU_get_number_of_failures(); 1298 CU_cleanup_registry(); 1299 return num_failures; 1300 } 1301