1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 uint64_t g_mr_size; 42 uint64_t g_mr_next_size; 43 struct ibv_mr g_rdma_mr; 44 45 #define RDMA_UT_UNITS_IN_MAX_IO 16 46 47 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 48 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 49 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 50 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 51 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 52 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 53 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 54 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 55 }; 56 57 SPDK_LOG_REGISTER_COMPONENT(nvmf) 58 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 59 uint64_t size, uint64_t translation), 0); 60 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 61 uint64_t size), 0); 62 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 63 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 64 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 65 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 66 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 67 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0); 68 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 69 70 struct spdk_trace_histories *g_trace_histories; 71 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 72 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 73 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 74 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 75 uint8_t arg1_type, const char *arg1_name)); 76 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 77 uint32_t size, uint64_t object_id, uint64_t arg1)); 78 79 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 80 struct spdk_nvmf_ctrlr_data *cdata)); 81 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 82 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 83 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 84 const struct spdk_nvme_transport_id *trid2), 0); 85 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 86 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 87 struct spdk_dif_ctx *dif_ctx), false); 88 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 89 enum spdk_nvme_transport_type trtype)); 90 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 91 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 92 93 const char * 94 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 95 { 96 switch (trtype) { 97 case SPDK_NVME_TRANSPORT_PCIE: 98 return "PCIe"; 99 case SPDK_NVME_TRANSPORT_RDMA: 100 return "RDMA"; 101 case SPDK_NVME_TRANSPORT_FC: 102 return "FC"; 103 default: 104 return NULL; 105 } 106 } 107 108 int 109 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 110 { 111 int len, i; 112 113 if (trstring == NULL) { 114 return -EINVAL; 115 } 116 117 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 118 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 119 return -EINVAL; 120 } 121 122 /* cast official trstring to uppercase version of input. */ 123 for (i = 0; i < len; i++) { 124 trid->trstring[i] = toupper(trstring[i]); 125 } 126 return 0; 127 } 128 129 uint64_t 130 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 131 { 132 if (g_mr_size != 0) { 133 *(uint32_t *)size = g_mr_size; 134 if (g_mr_next_size != 0) { 135 g_mr_size = g_mr_next_size; 136 } 137 } 138 139 return (uint64_t)&g_rdma_mr; 140 } 141 142 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 143 { 144 int i; 145 146 rdma_req->req.length = 0; 147 rdma_req->req.data_from_pool = false; 148 rdma_req->req.data = NULL; 149 rdma_req->data.wr.num_sge = 0; 150 rdma_req->data.wr.wr.rdma.remote_addr = 0; 151 rdma_req->data.wr.wr.rdma.rkey = 0; 152 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 153 154 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 155 rdma_req->req.iov[i].iov_base = 0; 156 rdma_req->req.iov[i].iov_len = 0; 157 rdma_req->req.buffers[i] = 0; 158 rdma_req->data.wr.sg_list[i].addr = 0; 159 rdma_req->data.wr.sg_list[i].length = 0; 160 rdma_req->data.wr.sg_list[i].lkey = 0; 161 } 162 rdma_req->req.iovcnt = 0; 163 } 164 165 static void 166 test_spdk_nvmf_rdma_request_parse_sgl(void) 167 { 168 struct spdk_nvmf_rdma_transport rtransport; 169 struct spdk_nvmf_rdma_device device; 170 struct spdk_nvmf_rdma_request rdma_req = {}; 171 struct spdk_nvmf_rdma_recv recv; 172 struct spdk_nvmf_rdma_poll_group group; 173 struct spdk_nvmf_rdma_qpair rqpair; 174 struct spdk_nvmf_rdma_poller poller; 175 union nvmf_c2h_msg cpl; 176 union nvmf_h2c_msg cmd; 177 struct spdk_nvme_sgl_descriptor *sgl; 178 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 179 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 180 struct spdk_nvmf_rdma_request_data data; 181 struct spdk_nvmf_transport_pg_cache_buf buffer; 182 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 183 int rc, i; 184 185 data.wr.sg_list = data.sgl; 186 STAILQ_INIT(&group.group.buf_cache); 187 group.group.buf_cache_size = 0; 188 group.group.buf_cache_count = 0; 189 group.group.transport = &rtransport.transport; 190 STAILQ_INIT(&group.retired_bufs); 191 poller.group = &group; 192 rqpair.poller = &poller; 193 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 194 195 sgl = &cmd.nvme_cmd.dptr.sgl1; 196 rdma_req.recv = &recv; 197 rdma_req.req.cmd = &cmd; 198 rdma_req.req.rsp = &cpl; 199 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 200 rdma_req.req.qpair = &rqpair.qpair; 201 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 202 203 rtransport.transport.opts = g_rdma_ut_transport_opts; 204 rtransport.data_wr_pool = NULL; 205 rtransport.transport.data_buf_pool = NULL; 206 207 device.attr.device_cap_flags = 0; 208 g_rdma_mr.lkey = 0xABCD; 209 sgl->keyed.key = 0xEEEE; 210 sgl->address = 0xFFFF; 211 rdma_req.recv->buf = (void *)0xDDDD; 212 213 /* Test 1: sgl type: keyed data block subtype: address */ 214 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 215 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 216 217 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 218 MOCK_SET(spdk_mempool_get, (void *)0x2000); 219 reset_nvmf_rdma_request(&rdma_req); 220 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 221 222 device.map = (void *)0x0; 223 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 224 CU_ASSERT(rc == 0); 225 CU_ASSERT(rdma_req.req.data_from_pool == true); 226 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 227 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 228 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 229 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 230 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 231 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 232 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 233 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 234 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 235 236 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 237 reset_nvmf_rdma_request(&rdma_req); 238 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 239 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 240 241 CU_ASSERT(rc == 0); 242 CU_ASSERT(rdma_req.req.data_from_pool == true); 243 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 244 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 245 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 246 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 247 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 248 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 249 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 250 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 251 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 252 } 253 254 /* Part 3: simple I/O one SGL larger than the transport max io size */ 255 reset_nvmf_rdma_request(&rdma_req); 256 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 257 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 258 259 CU_ASSERT(rc == -1); 260 261 /* Part 4: Pretend there are no buffer pools */ 262 MOCK_SET(spdk_mempool_get, NULL); 263 reset_nvmf_rdma_request(&rdma_req); 264 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 265 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 266 267 CU_ASSERT(rc == 0); 268 CU_ASSERT(rdma_req.req.data_from_pool == false); 269 CU_ASSERT(rdma_req.req.data == NULL); 270 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 271 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 272 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 273 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 274 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 275 276 rdma_req.recv->buf = (void *)0xDDDD; 277 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 278 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 279 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 280 281 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 282 reset_nvmf_rdma_request(&rdma_req); 283 sgl->address = 0; 284 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 285 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 286 287 CU_ASSERT(rc == 0); 288 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 289 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 290 CU_ASSERT(rdma_req.req.data_from_pool == false); 291 292 /* Part 2: I/O offset + length too large */ 293 reset_nvmf_rdma_request(&rdma_req); 294 sgl->address = rtransport.transport.opts.in_capsule_data_size; 295 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 296 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 297 298 CU_ASSERT(rc == -1); 299 300 /* Part 3: I/O too large */ 301 reset_nvmf_rdma_request(&rdma_req); 302 sgl->address = 0; 303 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 304 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 305 306 CU_ASSERT(rc == -1); 307 308 /* Test 3: Multi SGL */ 309 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 310 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 311 sgl->address = 0; 312 rdma_req.recv->buf = (void *)&sgl_desc; 313 MOCK_SET(spdk_mempool_get, &data); 314 315 /* part 1: 2 segments each with 1 wr. */ 316 reset_nvmf_rdma_request(&rdma_req); 317 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 318 for (i = 0; i < 2; i++) { 319 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 320 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 321 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 322 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 323 sgl_desc[i].keyed.key = 0x44; 324 } 325 326 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 327 328 CU_ASSERT(rc == 0); 329 CU_ASSERT(rdma_req.req.data_from_pool == true); 330 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 331 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 332 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 333 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 334 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 335 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 336 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 337 CU_ASSERT(data.wr.num_sge == 1); 338 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 339 340 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 341 reset_nvmf_rdma_request(&rdma_req); 342 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 343 for (i = 0; i < 2; i++) { 344 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 345 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 346 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 347 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 348 sgl_desc[i].keyed.key = 0x44; 349 } 350 351 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 352 353 CU_ASSERT(rc == 0); 354 CU_ASSERT(rdma_req.req.data_from_pool == true); 355 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 356 CU_ASSERT(rdma_req.req.iovcnt == 16); 357 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 358 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 359 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 360 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 361 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 362 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 363 CU_ASSERT(data.wr.num_sge == 8); 364 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 365 366 /* part 3: 2 segments, one very large, one very small */ 367 reset_nvmf_rdma_request(&rdma_req); 368 for (i = 0; i < 2; i++) { 369 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 370 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 371 sgl_desc[i].keyed.key = 0x44; 372 } 373 374 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 375 rtransport.transport.opts.io_unit_size / 2; 376 sgl_desc[0].address = 0x4000; 377 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 378 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 379 rtransport.transport.opts.io_unit_size / 2; 380 381 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 382 383 CU_ASSERT(rc == 0); 384 CU_ASSERT(rdma_req.req.data_from_pool == true); 385 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 386 CU_ASSERT(rdma_req.req.iovcnt == 17); 387 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 388 for (i = 0; i < 15; i++) { 389 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 390 } 391 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 392 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 393 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 394 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 395 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 396 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 397 rtransport.transport.opts.io_unit_size / 2); 398 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 399 CU_ASSERT(data.wr.num_sge == 1); 400 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 401 402 /* Test 4: use PG buffer cache */ 403 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 404 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 405 sgl->address = 0xFFFF; 406 rdma_req.recv->buf = (void *)0xDDDD; 407 g_rdma_mr.lkey = 0xABCD; 408 sgl->keyed.key = 0xEEEE; 409 410 for (i = 0; i < 4; i++) { 411 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 412 } 413 414 /* part 1: use the four buffers from the pg cache */ 415 group.group.buf_cache_size = 4; 416 group.group.buf_cache_count = 4; 417 MOCK_SET(spdk_mempool_get, (void *)0x2000); 418 reset_nvmf_rdma_request(&rdma_req); 419 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 420 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 421 422 SPDK_CU_ASSERT_FATAL(rc == 0); 423 CU_ASSERT(rdma_req.req.data_from_pool == true); 424 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 425 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 426 ~NVMF_DATA_BUFFER_MASK)); 427 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 428 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 429 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 430 CU_ASSERT(group.group.buf_cache_count == 0); 431 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 432 for (i = 0; i < 4; i++) { 433 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 434 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 435 ~NVMF_DATA_BUFFER_MASK)); 436 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 437 } 438 439 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 440 reset_nvmf_rdma_request(&rdma_req); 441 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 442 443 SPDK_CU_ASSERT_FATAL(rc == 0); 444 CU_ASSERT(rdma_req.req.data_from_pool == true); 445 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 446 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 447 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 448 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 449 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 450 CU_ASSERT(group.group.buf_cache_count == 0); 451 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 452 for (i = 0; i < 4; i++) { 453 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 454 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 455 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 456 CU_ASSERT(group.group.buf_cache_count == 0); 457 } 458 459 /* part 3: half and half */ 460 group.group.buf_cache_count = 2; 461 462 for (i = 0; i < 2; i++) { 463 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 464 } 465 reset_nvmf_rdma_request(&rdma_req); 466 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 467 468 SPDK_CU_ASSERT_FATAL(rc == 0); 469 CU_ASSERT(rdma_req.req.data_from_pool == true); 470 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 471 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 472 ~NVMF_DATA_BUFFER_MASK)); 473 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 474 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 475 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 476 CU_ASSERT(group.group.buf_cache_count == 0); 477 for (i = 0; i < 2; i++) { 478 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 479 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 480 ~NVMF_DATA_BUFFER_MASK)); 481 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 482 } 483 for (i = 2; i < 4; i++) { 484 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 485 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 486 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 487 } 488 489 reset_nvmf_rdma_request(&rdma_req); 490 /* Test 5 dealing with a buffer split over two Memory Regions */ 491 MOCK_SET(spdk_mempool_get, (void *)&buffer); 492 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 493 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 494 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 495 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 496 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 497 498 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 499 SPDK_CU_ASSERT_FATAL(rc == 0); 500 CU_ASSERT(rdma_req.req.data_from_pool == true); 501 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 502 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 503 ~NVMF_DATA_BUFFER_MASK)); 504 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 505 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 506 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 507 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 508 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 509 ~NVMF_DATA_BUFFER_MASK)); 510 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 511 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 512 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 513 CU_ASSERT(buffer_ptr == &buffer); 514 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 515 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 516 g_mr_size = 0; 517 g_mr_next_size = 0; 518 519 reset_nvmf_rdma_request(&rdma_req); 520 } 521 522 static struct spdk_nvmf_rdma_recv * 523 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 524 { 525 struct spdk_nvmf_rdma_recv *rdma_recv; 526 union nvmf_h2c_msg *cmd; 527 struct spdk_nvme_sgl_descriptor *sgl; 528 529 rdma_recv = calloc(1, sizeof(*rdma_recv)); 530 rdma_recv->qpair = rqpair; 531 cmd = calloc(1, sizeof(*cmd)); 532 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 533 cmd->nvme_cmd.opc = opc; 534 sgl = &cmd->nvme_cmd.dptr.sgl1; 535 sgl->keyed.key = 0xEEEE; 536 sgl->address = 0xFFFF; 537 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 538 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 539 sgl->keyed.length = 1; 540 541 return rdma_recv; 542 } 543 544 static void 545 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 546 { 547 free((void *)rdma_recv->sgl[0].addr); 548 free(rdma_recv); 549 } 550 551 static struct spdk_nvmf_rdma_request * 552 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 553 struct spdk_nvmf_rdma_recv *rdma_recv) 554 { 555 struct spdk_nvmf_rdma_request *rdma_req; 556 union nvmf_c2h_msg *cpl; 557 558 rdma_req = calloc(1, sizeof(*rdma_req)); 559 rdma_req->recv = rdma_recv; 560 rdma_req->req.qpair = &rqpair->qpair; 561 rdma_req->state = RDMA_REQUEST_STATE_NEW; 562 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 563 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 564 cpl = calloc(1, sizeof(*cpl)); 565 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 566 rdma_req->req.rsp = cpl; 567 568 return rdma_req; 569 } 570 571 static void 572 free_req(struct spdk_nvmf_rdma_request *rdma_req) 573 { 574 free((void *)rdma_req->rsp.sgl[0].addr); 575 free(rdma_req); 576 } 577 578 static void 579 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 580 struct spdk_nvmf_rdma_poller *poller, 581 struct spdk_nvmf_rdma_device *device, 582 struct spdk_nvmf_rdma_resources *resources, 583 struct spdk_nvmf_transport *transport) 584 { 585 memset(rqpair, 0, sizeof(*rqpair)); 586 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 587 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 588 rqpair->poller = poller; 589 rqpair->device = device; 590 rqpair->resources = resources; 591 rqpair->qpair.qid = 1; 592 rqpair->ibv_state = IBV_QPS_RTS; 593 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 594 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 595 rqpair->max_send_depth = 16; 596 rqpair->max_read_depth = 16; 597 rqpair->qpair.transport = transport; 598 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 599 } 600 601 static void 602 poller_reset(struct spdk_nvmf_rdma_poller *poller, 603 struct spdk_nvmf_rdma_poll_group *group) 604 { 605 memset(poller, 0, sizeof(*poller)); 606 STAILQ_INIT(&poller->qpairs_pending_recv); 607 STAILQ_INIT(&poller->qpairs_pending_send); 608 poller->group = group; 609 } 610 611 static void 612 test_spdk_nvmf_rdma_request_process(void) 613 { 614 struct spdk_nvmf_rdma_transport rtransport = {}; 615 struct spdk_nvmf_rdma_poll_group group = {}; 616 struct spdk_nvmf_rdma_poller poller = {}; 617 struct spdk_nvmf_rdma_device device = {}; 618 struct spdk_nvmf_rdma_resources resources = {}; 619 struct spdk_nvmf_rdma_qpair rqpair = {}; 620 struct spdk_nvmf_rdma_recv *rdma_recv; 621 struct spdk_nvmf_rdma_request *rdma_req; 622 bool progress; 623 624 STAILQ_INIT(&group.group.buf_cache); 625 STAILQ_INIT(&group.group.pending_buf_queue); 626 group.group.buf_cache_size = 0; 627 group.group.buf_cache_count = 0; 628 poller_reset(&poller, &group); 629 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 630 631 rtransport.transport.opts = g_rdma_ut_transport_opts; 632 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 633 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 634 sizeof(struct spdk_nvmf_rdma_request_data), 635 0, 0); 636 MOCK_CLEAR(spdk_mempool_get); 637 638 device.attr.device_cap_flags = 0; 639 device.map = (void *)0x0; 640 g_rdma_mr.lkey = 0xABCD; 641 642 /* Test 1: single SGL READ request */ 643 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 644 rdma_req = create_req(&rqpair, rdma_recv); 645 rqpair.current_recv_depth = 1; 646 /* NEW -> EXECUTING */ 647 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 648 CU_ASSERT(progress == true); 649 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 650 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 651 /* EXECUTED -> TRANSFERRING_C2H */ 652 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 653 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 654 CU_ASSERT(progress == true); 655 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 656 CU_ASSERT(rdma_req->recv == NULL); 657 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 658 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 659 /* COMPLETED -> FREE */ 660 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 661 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 662 CU_ASSERT(progress == true); 663 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 664 665 free_recv(rdma_recv); 666 free_req(rdma_req); 667 poller_reset(&poller, &group); 668 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 669 670 /* Test 2: single SGL WRITE request */ 671 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 672 rdma_req = create_req(&rqpair, rdma_recv); 673 rqpair.current_recv_depth = 1; 674 /* NEW -> TRANSFERRING_H2C */ 675 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 676 CU_ASSERT(progress == true); 677 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 678 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 679 STAILQ_INIT(&poller.qpairs_pending_send); 680 /* READY_TO_EXECUTE -> EXECUTING */ 681 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 682 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 683 CU_ASSERT(progress == true); 684 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 685 /* EXECUTED -> COMPLETING */ 686 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 687 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 688 CU_ASSERT(progress == true); 689 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 690 CU_ASSERT(rdma_req->recv == NULL); 691 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 692 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 693 /* COMPLETED -> FREE */ 694 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 695 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 696 CU_ASSERT(progress == true); 697 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 698 699 free_recv(rdma_recv); 700 free_req(rdma_req); 701 poller_reset(&poller, &group); 702 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 703 704 /* Test 3: WRITE+WRITE ibv_send batching */ 705 { 706 struct spdk_nvmf_rdma_recv *recv1, *recv2; 707 struct spdk_nvmf_rdma_request *req1, *req2; 708 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 709 req1 = create_req(&rqpair, recv1); 710 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 711 req2 = create_req(&rqpair, recv2); 712 713 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 714 rqpair.current_recv_depth = 1; 715 nvmf_rdma_request_process(&rtransport, req1); 716 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 717 718 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 719 rqpair.current_recv_depth = 2; 720 nvmf_rdma_request_process(&rtransport, req2); 721 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 722 723 STAILQ_INIT(&poller.qpairs_pending_send); 724 725 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 726 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 727 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 728 nvmf_rdma_request_process(&rtransport, req1); 729 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 730 /* WRITE 1: EXECUTED -> COMPLETING */ 731 req1->state = RDMA_REQUEST_STATE_EXECUTED; 732 nvmf_rdma_request_process(&rtransport, req1); 733 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 734 STAILQ_INIT(&poller.qpairs_pending_send); 735 /* WRITE 1: COMPLETED -> FREE */ 736 req1->state = RDMA_REQUEST_STATE_COMPLETED; 737 nvmf_rdma_request_process(&rtransport, req1); 738 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 739 740 /* Now WRITE 2 has finished reading and completes */ 741 /* WRITE 2: COMPLETED -> FREE */ 742 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 743 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 744 nvmf_rdma_request_process(&rtransport, req2); 745 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 746 /* WRITE 1: EXECUTED -> COMPLETING */ 747 req2->state = RDMA_REQUEST_STATE_EXECUTED; 748 nvmf_rdma_request_process(&rtransport, req2); 749 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 750 STAILQ_INIT(&poller.qpairs_pending_send); 751 /* WRITE 1: COMPLETED -> FREE */ 752 req2->state = RDMA_REQUEST_STATE_COMPLETED; 753 nvmf_rdma_request_process(&rtransport, req2); 754 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 755 756 free_recv(recv1); 757 free_req(req1); 758 free_recv(recv2); 759 free_req(req2); 760 poller_reset(&poller, &group); 761 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 762 } 763 764 /* Test 4, invalid command, check xfer type */ 765 { 766 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 767 struct spdk_nvmf_rdma_request *rdma_req_inv; 768 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 769 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 770 771 rdma_recv_inv = create_recv(&rqpair, opc); 772 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 773 774 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 775 rqpair.current_recv_depth = 1; 776 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 777 CU_ASSERT(progress == true); 778 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 779 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 780 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 781 782 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 783 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 784 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 785 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 786 787 free_recv(rdma_recv_inv); 788 free_req(rdma_req_inv); 789 poller_reset(&poller, &group); 790 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 791 } 792 793 spdk_mempool_free(rtransport.transport.data_buf_pool); 794 spdk_mempool_free(rtransport.data_wr_pool); 795 } 796 797 #define TEST_GROUPS_COUNT 5 798 static void 799 test_nvmf_rdma_get_optimal_poll_group(void) 800 { 801 struct spdk_nvmf_rdma_transport rtransport = {}; 802 struct spdk_nvmf_transport *transport = &rtransport.transport; 803 struct spdk_nvmf_rdma_qpair rqpair = {}; 804 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 805 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 806 struct spdk_nvmf_transport_poll_group *result; 807 uint32_t i; 808 809 rqpair.qpair.transport = transport; 810 pthread_mutex_init(&rtransport.lock, NULL); 811 TAILQ_INIT(&rtransport.poll_groups); 812 813 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 814 groups[i] = nvmf_rdma_poll_group_create(transport); 815 CU_ASSERT(groups[i] != NULL); 816 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 817 groups[i]->transport = transport; 818 } 819 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 820 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 821 822 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 823 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 824 rqpair.qpair.qid = 0; 825 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 826 CU_ASSERT(result == groups[i]); 827 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 828 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 829 830 rqpair.qpair.qid = 1; 831 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 832 CU_ASSERT(result == groups[i]); 833 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 834 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 835 } 836 /* wrap around, admin/io pg point to the first pg 837 Destroy all poll groups except of the last one */ 838 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 839 nvmf_rdma_poll_group_destroy(groups[i]); 840 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 841 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 842 } 843 844 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 845 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 846 847 /* Check that pointers to the next admin/io poll groups are not changed */ 848 rqpair.qpair.qid = 0; 849 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 850 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 851 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 852 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 853 854 rqpair.qpair.qid = 1; 855 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 856 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 857 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 858 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 859 860 /* Remove the last poll group, check that pointers are NULL */ 861 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 862 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 863 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 864 865 /* Request optimal poll group, result must be NULL */ 866 rqpair.qpair.qid = 0; 867 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 868 CU_ASSERT(result == NULL); 869 870 rqpair.qpair.qid = 1; 871 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 872 CU_ASSERT(result == NULL); 873 874 pthread_mutex_destroy(&rtransport.lock); 875 } 876 #undef TEST_GROUPS_COUNT 877 878 static void 879 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 880 { 881 struct spdk_nvmf_rdma_transport rtransport; 882 struct spdk_nvmf_rdma_device device; 883 struct spdk_nvmf_rdma_request rdma_req = {}; 884 struct spdk_nvmf_rdma_recv recv; 885 struct spdk_nvmf_rdma_poll_group group; 886 struct spdk_nvmf_rdma_qpair rqpair; 887 struct spdk_nvmf_rdma_poller poller; 888 union nvmf_c2h_msg cpl; 889 union nvmf_h2c_msg cmd; 890 struct spdk_nvme_sgl_descriptor *sgl; 891 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 892 struct spdk_nvmf_rdma_request_data data; 893 char data2_buffer[8192]; 894 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 895 struct spdk_nvmf_transport_pg_cache_buf buffer; 896 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 897 const uint32_t data_bs = 512; 898 const uint32_t md_size = 8; 899 int rc, i; 900 void *aligned_buffer; 901 902 data.wr.sg_list = data.sgl; 903 STAILQ_INIT(&group.group.buf_cache); 904 group.group.buf_cache_size = 0; 905 group.group.buf_cache_count = 0; 906 group.group.transport = &rtransport.transport; 907 STAILQ_INIT(&group.retired_bufs); 908 poller.group = &group; 909 rqpair.poller = &poller; 910 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 911 912 sgl = &cmd.nvme_cmd.dptr.sgl1; 913 rdma_req.recv = &recv; 914 rdma_req.req.cmd = &cmd; 915 rdma_req.req.rsp = &cpl; 916 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 917 rdma_req.req.qpair = &rqpair.qpair; 918 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 919 920 rtransport.transport.opts = g_rdma_ut_transport_opts; 921 rtransport.data_wr_pool = NULL; 922 rtransport.transport.data_buf_pool = NULL; 923 924 device.attr.device_cap_flags = 0; 925 device.map = NULL; 926 g_rdma_mr.lkey = 0xABCD; 927 sgl->keyed.key = 0xEEEE; 928 sgl->address = 0xFFFF; 929 rdma_req.recv->buf = (void *)0xDDDD; 930 931 /* Test 1: sgl type: keyed data block subtype: address */ 932 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 933 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 934 935 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 936 MOCK_SET(spdk_mempool_get, (void *)0x2000); 937 reset_nvmf_rdma_request(&rdma_req); 938 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 939 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 940 0, 0, 0, 0, 0); 941 rdma_req.req.dif.dif_insert_or_strip = true; 942 rtransport.transport.opts.io_unit_size = data_bs * 8; 943 sgl->keyed.length = data_bs * 4; 944 945 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 946 947 CU_ASSERT(rc == 0); 948 CU_ASSERT(rdma_req.req.data_from_pool == true); 949 CU_ASSERT(rdma_req.req.length == data_bs * 4); 950 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 951 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 952 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 953 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 954 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 955 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 956 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 957 958 for (i = 0; i < 4; ++i) { 959 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 960 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 961 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 962 } 963 964 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 965 block size 512 */ 966 MOCK_SET(spdk_mempool_get, (void *)0x2000); 967 reset_nvmf_rdma_request(&rdma_req); 968 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 969 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 970 0, 0, 0, 0, 0); 971 rdma_req.req.dif.dif_insert_or_strip = true; 972 rtransport.transport.opts.io_unit_size = data_bs * 4; 973 sgl->keyed.length = data_bs * 4; 974 975 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 976 977 CU_ASSERT(rc == 0); 978 CU_ASSERT(rdma_req.req.data_from_pool == true); 979 CU_ASSERT(rdma_req.req.length == data_bs * 4); 980 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 981 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 982 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 983 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 984 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 985 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 986 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 987 988 for (i = 0; i < 3; ++i) { 989 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 990 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 991 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 992 } 993 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 994 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 995 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 996 997 /* 2nd buffer consumed */ 998 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 999 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1000 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1001 1002 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 1003 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1004 reset_nvmf_rdma_request(&rdma_req); 1005 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1006 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1007 0, 0, 0, 0, 0); 1008 rdma_req.req.dif.dif_insert_or_strip = true; 1009 rtransport.transport.opts.io_unit_size = data_bs; 1010 sgl->keyed.length = data_bs; 1011 1012 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1013 1014 CU_ASSERT(rc == 0); 1015 CU_ASSERT(rdma_req.req.data_from_pool == true); 1016 CU_ASSERT(rdma_req.req.length == data_bs); 1017 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1018 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 1019 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1020 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 1021 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1022 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1023 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1024 1025 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1026 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 1027 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1028 1029 CU_ASSERT(rdma_req.req.iovcnt == 2); 1030 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 1031 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 1032 /* 2nd buffer consumed for metadata */ 1033 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 1034 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 1035 1036 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 1037 block size 512 */ 1038 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1039 reset_nvmf_rdma_request(&rdma_req); 1040 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1041 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1042 0, 0, 0, 0, 0); 1043 rdma_req.req.dif.dif_insert_or_strip = true; 1044 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1045 sgl->keyed.length = data_bs * 4; 1046 1047 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1048 1049 CU_ASSERT(rc == 0); 1050 CU_ASSERT(rdma_req.req.data_from_pool == true); 1051 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1052 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1053 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1054 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1055 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1056 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1057 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1058 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1059 1060 for (i = 0; i < 4; ++i) { 1061 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1062 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1063 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1064 } 1065 1066 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1067 block size 512 */ 1068 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1069 reset_nvmf_rdma_request(&rdma_req); 1070 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1071 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1072 0, 0, 0, 0, 0); 1073 rdma_req.req.dif.dif_insert_or_strip = true; 1074 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1075 sgl->keyed.length = data_bs * 4; 1076 1077 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1078 1079 CU_ASSERT(rc == 0); 1080 CU_ASSERT(rdma_req.req.data_from_pool == true); 1081 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1082 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1083 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1084 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1085 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1086 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1087 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1088 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1089 1090 for (i = 0; i < 2; ++i) { 1091 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1092 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1093 } 1094 for (i = 0; i < 2; ++i) { 1095 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1096 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1097 } 1098 1099 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1100 block size 512 */ 1101 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1102 reset_nvmf_rdma_request(&rdma_req); 1103 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1104 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1105 0, 0, 0, 0, 0); 1106 rdma_req.req.dif.dif_insert_or_strip = true; 1107 rtransport.transport.opts.io_unit_size = data_bs * 4; 1108 sgl->keyed.length = data_bs * 6; 1109 1110 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1111 1112 CU_ASSERT(rc == 0); 1113 CU_ASSERT(rdma_req.req.data_from_pool == true); 1114 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1115 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1116 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1117 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1118 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1119 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1120 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1121 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1122 1123 for (i = 0; i < 3; ++i) { 1124 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1125 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1126 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1127 } 1128 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1129 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1130 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1131 1132 /* 2nd IO buffer consumed */ 1133 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1134 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1135 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1136 1137 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1138 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1139 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1140 1141 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1142 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1143 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1144 1145 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1146 one WR can hold. Additional WR is chained */ 1147 MOCK_SET(spdk_mempool_get, data2_buffer); 1148 aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) & 1149 ~NVMF_DATA_BUFFER_MASK); 1150 reset_nvmf_rdma_request(&rdma_req); 1151 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1152 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1153 0, 0, 0, 0, 0); 1154 rdma_req.req.dif.dif_insert_or_strip = true; 1155 rtransport.transport.opts.io_unit_size = data_bs * 16; 1156 sgl->keyed.length = data_bs * 16; 1157 1158 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1159 1160 CU_ASSERT(rc == 0); 1161 CU_ASSERT(rdma_req.req.data_from_pool == true); 1162 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1163 CU_ASSERT(rdma_req.req.iovcnt == 2); 1164 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1165 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1166 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1167 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1168 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1169 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1170 /* additional wr from pool */ 1171 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1172 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1173 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1174 1175 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1176 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1177 reset_nvmf_rdma_request(&rdma_req); 1178 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1179 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1180 0, 0, 0, 0, 0); 1181 rdma_req.req.dif.dif_insert_or_strip = true; 1182 rtransport.transport.opts.io_unit_size = 516; 1183 sgl->keyed.length = data_bs * 2; 1184 1185 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1186 1187 CU_ASSERT(rc == 0); 1188 CU_ASSERT(rdma_req.req.data_from_pool == true); 1189 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1190 CU_ASSERT(rdma_req.req.iovcnt == 3); 1191 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1192 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1193 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1194 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1195 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1196 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1197 1198 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1199 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1200 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1201 1202 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1203 is located at the beginning of that buffer */ 1204 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1205 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1206 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1207 1208 /* Test 9 dealing with a buffer split over two Memory Regions */ 1209 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1210 reset_nvmf_rdma_request(&rdma_req); 1211 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1212 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1213 0, 0, 0, 0, 0); 1214 rdma_req.req.dif.dif_insert_or_strip = true; 1215 rtransport.transport.opts.io_unit_size = data_bs * 4; 1216 sgl->keyed.length = data_bs * 2; 1217 g_mr_size = data_bs; 1218 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1219 1220 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1221 SPDK_CU_ASSERT_FATAL(rc == 0); 1222 CU_ASSERT(rdma_req.req.data_from_pool == true); 1223 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1224 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1225 ~NVMF_DATA_BUFFER_MASK)); 1226 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1227 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1228 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1229 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1230 for (i = 0; i < 2; i++) { 1231 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1232 (data_bs + md_size)); 1233 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1234 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1235 } 1236 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1237 CU_ASSERT(buffer_ptr == &buffer); 1238 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1239 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1240 g_mr_size = 0; 1241 g_mr_next_size = 0; 1242 1243 /* Test 2: Multi SGL */ 1244 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1245 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1246 sgl->address = 0; 1247 rdma_req.recv->buf = (void *)&sgl_desc; 1248 MOCK_SET(spdk_mempool_get, &data); 1249 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1250 ~NVMF_DATA_BUFFER_MASK); 1251 1252 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1253 reset_nvmf_rdma_request(&rdma_req); 1254 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1255 SPDK_DIF_TYPE1, 1256 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1257 rdma_req.req.dif.dif_insert_or_strip = true; 1258 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1259 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1260 1261 for (i = 0; i < 2; i++) { 1262 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1263 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1264 sgl_desc[i].keyed.length = data_bs * 4; 1265 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1266 sgl_desc[i].keyed.key = 0x44; 1267 } 1268 1269 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1270 1271 CU_ASSERT(rc == 0); 1272 CU_ASSERT(rdma_req.req.data_from_pool == true); 1273 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1274 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1275 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1276 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1277 for (i = 0; i < 4; ++i) { 1278 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1279 (data_bs + md_size)); 1280 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1281 } 1282 1283 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1284 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1285 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1286 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1287 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1288 CU_ASSERT(data.wr.num_sge == 4); 1289 for (i = 0; i < 4; ++i) { 1290 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1291 (data_bs + md_size)); 1292 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1293 } 1294 1295 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1296 } 1297 1298 int main(int argc, char **argv) 1299 { 1300 CU_pSuite suite = NULL; 1301 unsigned int num_failures; 1302 1303 CU_set_error_action(CUEA_ABORT); 1304 CU_initialize_registry(); 1305 1306 suite = CU_add_suite("nvmf", NULL, NULL); 1307 1308 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1309 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1310 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1311 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1312 1313 CU_basic_set_mode(CU_BRM_VERBOSE); 1314 CU_basic_run_tests(); 1315 num_failures = CU_get_number_of_failures(); 1316 CU_cleanup_registry(); 1317 return num_failures; 1318 } 1319