1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "common/lib/test_rdma.c" 38 #include "nvmf/rdma.c" 39 #include "nvmf/transport.c" 40 41 #define RDMA_UT_UNITS_IN_MAX_IO 16 42 43 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 44 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 45 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 46 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 47 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 48 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 49 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 50 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 51 }; 52 53 SPDK_LOG_REGISTER_COMPONENT(nvmf) 54 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 55 uint64_t size, uint64_t translation), 0); 56 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 57 uint64_t size), 0); 58 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 59 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 60 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 61 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 62 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 63 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 0); 64 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 65 66 struct spdk_trace_histories *g_trace_histories; 67 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 68 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 69 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 70 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 71 uint8_t arg1_type, const char *arg1_name)); 72 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 73 uint32_t size, uint64_t object_id, uint64_t arg1)); 74 75 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts, 76 struct spdk_nvmf_ctrlr_data *cdata)); 77 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 78 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 79 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 80 const struct spdk_nvme_transport_id *trid2), 0); 81 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 82 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 83 struct spdk_dif_ctx *dif_ctx), false); 84 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 85 enum spdk_nvme_transport_type trtype)); 86 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 87 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 88 89 const char * 90 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 91 { 92 switch (trtype) { 93 case SPDK_NVME_TRANSPORT_PCIE: 94 return "PCIe"; 95 case SPDK_NVME_TRANSPORT_RDMA: 96 return "RDMA"; 97 case SPDK_NVME_TRANSPORT_FC: 98 return "FC"; 99 default: 100 return NULL; 101 } 102 } 103 104 int 105 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 106 { 107 int len, i; 108 109 if (trstring == NULL) { 110 return -EINVAL; 111 } 112 113 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 114 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 115 return -EINVAL; 116 } 117 118 /* cast official trstring to uppercase version of input. */ 119 for (i = 0; i < len; i++) { 120 trid->trstring[i] = toupper(trstring[i]); 121 } 122 return 0; 123 } 124 125 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 126 { 127 int i; 128 129 rdma_req->req.length = 0; 130 rdma_req->req.data_from_pool = false; 131 rdma_req->req.data = NULL; 132 rdma_req->data.wr.num_sge = 0; 133 rdma_req->data.wr.wr.rdma.remote_addr = 0; 134 rdma_req->data.wr.wr.rdma.rkey = 0; 135 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 136 137 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 138 rdma_req->req.iov[i].iov_base = 0; 139 rdma_req->req.iov[i].iov_len = 0; 140 rdma_req->req.buffers[i] = 0; 141 rdma_req->data.wr.sg_list[i].addr = 0; 142 rdma_req->data.wr.sg_list[i].length = 0; 143 rdma_req->data.wr.sg_list[i].lkey = 0; 144 } 145 rdma_req->req.iovcnt = 0; 146 } 147 148 static void 149 test_spdk_nvmf_rdma_request_parse_sgl(void) 150 { 151 struct spdk_nvmf_rdma_transport rtransport; 152 struct spdk_nvmf_rdma_device device; 153 struct spdk_nvmf_rdma_request rdma_req = {}; 154 struct spdk_nvmf_rdma_recv recv; 155 struct spdk_nvmf_rdma_poll_group group; 156 struct spdk_nvmf_rdma_qpair rqpair; 157 struct spdk_nvmf_rdma_poller poller; 158 union nvmf_c2h_msg cpl; 159 union nvmf_h2c_msg cmd; 160 struct spdk_nvme_sgl_descriptor *sgl; 161 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 162 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 163 struct spdk_nvmf_rdma_request_data data; 164 int rc, i; 165 166 data.wr.sg_list = data.sgl; 167 STAILQ_INIT(&group.group.buf_cache); 168 group.group.buf_cache_size = 0; 169 group.group.buf_cache_count = 0; 170 group.group.transport = &rtransport.transport; 171 poller.group = &group; 172 rqpair.poller = &poller; 173 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 174 175 sgl = &cmd.nvme_cmd.dptr.sgl1; 176 rdma_req.recv = &recv; 177 rdma_req.req.cmd = &cmd; 178 rdma_req.req.rsp = &cpl; 179 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 180 rdma_req.req.qpair = &rqpair.qpair; 181 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 182 183 rtransport.transport.opts = g_rdma_ut_transport_opts; 184 rtransport.data_wr_pool = NULL; 185 rtransport.transport.data_buf_pool = NULL; 186 187 device.attr.device_cap_flags = 0; 188 sgl->keyed.key = 0xEEEE; 189 sgl->address = 0xFFFF; 190 rdma_req.recv->buf = (void *)0xDDDD; 191 192 /* Test 1: sgl type: keyed data block subtype: address */ 193 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 194 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 195 196 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 197 MOCK_SET(spdk_mempool_get, (void *)0x2000); 198 reset_nvmf_rdma_request(&rdma_req); 199 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 200 201 device.map = (void *)0x0; 202 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 203 CU_ASSERT(rc == 0); 204 CU_ASSERT(rdma_req.req.data_from_pool == true); 205 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 206 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 207 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 208 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 209 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 210 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 211 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 212 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 213 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 214 215 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 216 reset_nvmf_rdma_request(&rdma_req); 217 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 218 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 219 220 CU_ASSERT(rc == 0); 221 CU_ASSERT(rdma_req.req.data_from_pool == true); 222 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 223 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 224 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 225 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 226 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 227 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 228 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 229 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 230 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 231 } 232 233 /* Part 3: simple I/O one SGL larger than the transport max io size */ 234 reset_nvmf_rdma_request(&rdma_req); 235 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 236 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 237 238 CU_ASSERT(rc == -1); 239 240 /* Part 4: Pretend there are no buffer pools */ 241 MOCK_SET(spdk_mempool_get, NULL); 242 reset_nvmf_rdma_request(&rdma_req); 243 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 244 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 245 246 CU_ASSERT(rc == 0); 247 CU_ASSERT(rdma_req.req.data_from_pool == false); 248 CU_ASSERT(rdma_req.req.data == NULL); 249 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 250 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 251 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 252 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 253 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 254 255 rdma_req.recv->buf = (void *)0xDDDD; 256 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 257 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 258 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 259 260 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 261 reset_nvmf_rdma_request(&rdma_req); 262 sgl->address = 0; 263 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 264 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 265 266 CU_ASSERT(rc == 0); 267 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 268 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 269 CU_ASSERT(rdma_req.req.data_from_pool == false); 270 271 /* Part 2: I/O offset + length too large */ 272 reset_nvmf_rdma_request(&rdma_req); 273 sgl->address = rtransport.transport.opts.in_capsule_data_size; 274 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 275 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 276 277 CU_ASSERT(rc == -1); 278 279 /* Part 3: I/O too large */ 280 reset_nvmf_rdma_request(&rdma_req); 281 sgl->address = 0; 282 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 283 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 284 285 CU_ASSERT(rc == -1); 286 287 /* Test 3: Multi SGL */ 288 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 289 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 290 sgl->address = 0; 291 rdma_req.recv->buf = (void *)&sgl_desc; 292 MOCK_SET(spdk_mempool_get, &data); 293 294 /* part 1: 2 segments each with 1 wr. */ 295 reset_nvmf_rdma_request(&rdma_req); 296 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 297 for (i = 0; i < 2; i++) { 298 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 299 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 300 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 301 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 302 sgl_desc[i].keyed.key = 0x44; 303 } 304 305 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 306 307 CU_ASSERT(rc == 0); 308 CU_ASSERT(rdma_req.req.data_from_pool == true); 309 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 310 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 311 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 312 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 313 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 314 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 315 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 316 CU_ASSERT(data.wr.num_sge == 1); 317 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 318 319 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 320 reset_nvmf_rdma_request(&rdma_req); 321 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 322 for (i = 0; i < 2; i++) { 323 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 324 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 325 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 326 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 327 sgl_desc[i].keyed.key = 0x44; 328 } 329 330 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 331 332 CU_ASSERT(rc == 0); 333 CU_ASSERT(rdma_req.req.data_from_pool == true); 334 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 335 CU_ASSERT(rdma_req.req.iovcnt == 16); 336 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 337 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 338 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 339 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 340 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 341 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 342 CU_ASSERT(data.wr.num_sge == 8); 343 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 344 345 /* part 3: 2 segments, one very large, one very small */ 346 reset_nvmf_rdma_request(&rdma_req); 347 for (i = 0; i < 2; i++) { 348 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 349 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 350 sgl_desc[i].keyed.key = 0x44; 351 } 352 353 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 354 rtransport.transport.opts.io_unit_size / 2; 355 sgl_desc[0].address = 0x4000; 356 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 357 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 358 rtransport.transport.opts.io_unit_size / 2; 359 360 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 361 362 CU_ASSERT(rc == 0); 363 CU_ASSERT(rdma_req.req.data_from_pool == true); 364 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 365 CU_ASSERT(rdma_req.req.iovcnt == 17); 366 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 367 for (i = 0; i < 15; i++) { 368 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 369 } 370 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 371 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 372 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 373 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 374 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 375 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 376 rtransport.transport.opts.io_unit_size / 2); 377 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 378 CU_ASSERT(data.wr.num_sge == 1); 379 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 380 381 /* Test 4: use PG buffer cache */ 382 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 383 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 384 sgl->address = 0xFFFF; 385 rdma_req.recv->buf = (void *)0xDDDD; 386 sgl->keyed.key = 0xEEEE; 387 388 for (i = 0; i < 4; i++) { 389 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 390 } 391 392 /* part 1: use the four buffers from the pg cache */ 393 group.group.buf_cache_size = 4; 394 group.group.buf_cache_count = 4; 395 MOCK_SET(spdk_mempool_get, (void *)0x2000); 396 reset_nvmf_rdma_request(&rdma_req); 397 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 398 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 399 400 SPDK_CU_ASSERT_FATAL(rc == 0); 401 CU_ASSERT(rdma_req.req.data_from_pool == true); 402 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 403 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 404 ~NVMF_DATA_BUFFER_MASK)); 405 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 406 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 407 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 408 CU_ASSERT(group.group.buf_cache_count == 0); 409 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 410 for (i = 0; i < 4; i++) { 411 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 412 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 413 ~NVMF_DATA_BUFFER_MASK)); 414 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 415 } 416 417 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 418 reset_nvmf_rdma_request(&rdma_req); 419 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 420 421 SPDK_CU_ASSERT_FATAL(rc == 0); 422 CU_ASSERT(rdma_req.req.data_from_pool == true); 423 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 424 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 425 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 426 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 427 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 428 CU_ASSERT(group.group.buf_cache_count == 0); 429 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 430 for (i = 0; i < 4; i++) { 431 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 432 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 433 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 434 CU_ASSERT(group.group.buf_cache_count == 0); 435 } 436 437 /* part 3: half and half */ 438 group.group.buf_cache_count = 2; 439 440 for (i = 0; i < 2; i++) { 441 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 442 } 443 reset_nvmf_rdma_request(&rdma_req); 444 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 445 446 SPDK_CU_ASSERT_FATAL(rc == 0); 447 CU_ASSERT(rdma_req.req.data_from_pool == true); 448 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 449 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 450 ~NVMF_DATA_BUFFER_MASK)); 451 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 452 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 453 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 454 CU_ASSERT(group.group.buf_cache_count == 0); 455 for (i = 0; i < 2; i++) { 456 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 457 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 458 ~NVMF_DATA_BUFFER_MASK)); 459 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 460 } 461 for (i = 2; i < 4; i++) { 462 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 463 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 464 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 465 } 466 467 reset_nvmf_rdma_request(&rdma_req); 468 } 469 470 static struct spdk_nvmf_rdma_recv * 471 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 472 { 473 struct spdk_nvmf_rdma_recv *rdma_recv; 474 union nvmf_h2c_msg *cmd; 475 struct spdk_nvme_sgl_descriptor *sgl; 476 477 rdma_recv = calloc(1, sizeof(*rdma_recv)); 478 rdma_recv->qpair = rqpair; 479 cmd = calloc(1, sizeof(*cmd)); 480 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 481 cmd->nvme_cmd.opc = opc; 482 sgl = &cmd->nvme_cmd.dptr.sgl1; 483 sgl->keyed.key = 0xEEEE; 484 sgl->address = 0xFFFF; 485 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 486 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 487 sgl->keyed.length = 1; 488 489 return rdma_recv; 490 } 491 492 static void 493 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 494 { 495 free((void *)rdma_recv->sgl[0].addr); 496 free(rdma_recv); 497 } 498 499 static struct spdk_nvmf_rdma_request * 500 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 501 struct spdk_nvmf_rdma_recv *rdma_recv) 502 { 503 struct spdk_nvmf_rdma_request *rdma_req; 504 union nvmf_c2h_msg *cpl; 505 506 rdma_req = calloc(1, sizeof(*rdma_req)); 507 rdma_req->recv = rdma_recv; 508 rdma_req->req.qpair = &rqpair->qpair; 509 rdma_req->state = RDMA_REQUEST_STATE_NEW; 510 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 511 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 512 cpl = calloc(1, sizeof(*cpl)); 513 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 514 rdma_req->req.rsp = cpl; 515 516 return rdma_req; 517 } 518 519 static void 520 free_req(struct spdk_nvmf_rdma_request *rdma_req) 521 { 522 free((void *)rdma_req->rsp.sgl[0].addr); 523 free(rdma_req); 524 } 525 526 static void 527 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 528 struct spdk_nvmf_rdma_poller *poller, 529 struct spdk_nvmf_rdma_device *device, 530 struct spdk_nvmf_rdma_resources *resources, 531 struct spdk_nvmf_transport *transport) 532 { 533 memset(rqpair, 0, sizeof(*rqpair)); 534 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 535 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 536 rqpair->poller = poller; 537 rqpair->device = device; 538 rqpair->resources = resources; 539 rqpair->qpair.qid = 1; 540 rqpair->ibv_state = IBV_QPS_RTS; 541 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 542 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 543 rqpair->max_send_depth = 16; 544 rqpair->max_read_depth = 16; 545 rqpair->qpair.transport = transport; 546 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 547 } 548 549 static void 550 poller_reset(struct spdk_nvmf_rdma_poller *poller, 551 struct spdk_nvmf_rdma_poll_group *group) 552 { 553 memset(poller, 0, sizeof(*poller)); 554 STAILQ_INIT(&poller->qpairs_pending_recv); 555 STAILQ_INIT(&poller->qpairs_pending_send); 556 poller->group = group; 557 } 558 559 static void 560 test_spdk_nvmf_rdma_request_process(void) 561 { 562 struct spdk_nvmf_rdma_transport rtransport = {}; 563 struct spdk_nvmf_rdma_poll_group group = {}; 564 struct spdk_nvmf_rdma_poller poller = {}; 565 struct spdk_nvmf_rdma_device device = {}; 566 struct spdk_nvmf_rdma_resources resources = {}; 567 struct spdk_nvmf_rdma_qpair rqpair = {}; 568 struct spdk_nvmf_rdma_recv *rdma_recv; 569 struct spdk_nvmf_rdma_request *rdma_req; 570 bool progress; 571 572 STAILQ_INIT(&group.group.buf_cache); 573 STAILQ_INIT(&group.group.pending_buf_queue); 574 group.group.buf_cache_size = 0; 575 group.group.buf_cache_count = 0; 576 poller_reset(&poller, &group); 577 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 578 579 rtransport.transport.opts = g_rdma_ut_transport_opts; 580 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 581 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 582 sizeof(struct spdk_nvmf_rdma_request_data), 583 0, 0); 584 MOCK_CLEAR(spdk_mempool_get); 585 586 device.attr.device_cap_flags = 0; 587 device.map = (void *)0x0; 588 589 /* Test 1: single SGL READ request */ 590 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 591 rdma_req = create_req(&rqpair, rdma_recv); 592 rqpair.current_recv_depth = 1; 593 /* NEW -> EXECUTING */ 594 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 595 CU_ASSERT(progress == true); 596 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 597 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 598 /* EXECUTED -> TRANSFERRING_C2H */ 599 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 600 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 601 CU_ASSERT(progress == true); 602 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 603 CU_ASSERT(rdma_req->recv == NULL); 604 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 605 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 606 /* COMPLETED -> FREE */ 607 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 608 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 609 CU_ASSERT(progress == true); 610 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 611 612 free_recv(rdma_recv); 613 free_req(rdma_req); 614 poller_reset(&poller, &group); 615 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 616 617 /* Test 2: single SGL WRITE request */ 618 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 619 rdma_req = create_req(&rqpair, rdma_recv); 620 rqpair.current_recv_depth = 1; 621 /* NEW -> TRANSFERRING_H2C */ 622 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 623 CU_ASSERT(progress == true); 624 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 625 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 626 STAILQ_INIT(&poller.qpairs_pending_send); 627 /* READY_TO_EXECUTE -> EXECUTING */ 628 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 629 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 630 CU_ASSERT(progress == true); 631 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 632 /* EXECUTED -> COMPLETING */ 633 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 634 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 635 CU_ASSERT(progress == true); 636 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 637 CU_ASSERT(rdma_req->recv == NULL); 638 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 639 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 640 /* COMPLETED -> FREE */ 641 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 642 progress = nvmf_rdma_request_process(&rtransport, rdma_req); 643 CU_ASSERT(progress == true); 644 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 645 646 free_recv(rdma_recv); 647 free_req(rdma_req); 648 poller_reset(&poller, &group); 649 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 650 651 /* Test 3: WRITE+WRITE ibv_send batching */ 652 { 653 struct spdk_nvmf_rdma_recv *recv1, *recv2; 654 struct spdk_nvmf_rdma_request *req1, *req2; 655 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 656 req1 = create_req(&rqpair, recv1); 657 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 658 req2 = create_req(&rqpair, recv2); 659 660 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 661 rqpair.current_recv_depth = 1; 662 nvmf_rdma_request_process(&rtransport, req1); 663 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 664 665 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 666 rqpair.current_recv_depth = 2; 667 nvmf_rdma_request_process(&rtransport, req2); 668 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 669 670 STAILQ_INIT(&poller.qpairs_pending_send); 671 672 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 673 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 674 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 675 nvmf_rdma_request_process(&rtransport, req1); 676 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 677 /* WRITE 1: EXECUTED -> COMPLETING */ 678 req1->state = RDMA_REQUEST_STATE_EXECUTED; 679 nvmf_rdma_request_process(&rtransport, req1); 680 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 681 STAILQ_INIT(&poller.qpairs_pending_send); 682 /* WRITE 1: COMPLETED -> FREE */ 683 req1->state = RDMA_REQUEST_STATE_COMPLETED; 684 nvmf_rdma_request_process(&rtransport, req1); 685 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 686 687 /* Now WRITE 2 has finished reading and completes */ 688 /* WRITE 2: COMPLETED -> FREE */ 689 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 690 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 691 nvmf_rdma_request_process(&rtransport, req2); 692 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 693 /* WRITE 1: EXECUTED -> COMPLETING */ 694 req2->state = RDMA_REQUEST_STATE_EXECUTED; 695 nvmf_rdma_request_process(&rtransport, req2); 696 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 697 STAILQ_INIT(&poller.qpairs_pending_send); 698 /* WRITE 1: COMPLETED -> FREE */ 699 req2->state = RDMA_REQUEST_STATE_COMPLETED; 700 nvmf_rdma_request_process(&rtransport, req2); 701 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 702 703 free_recv(recv1); 704 free_req(req1); 705 free_recv(recv2); 706 free_req(req2); 707 poller_reset(&poller, &group); 708 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 709 } 710 711 /* Test 4, invalid command, check xfer type */ 712 { 713 struct spdk_nvmf_rdma_recv *rdma_recv_inv; 714 struct spdk_nvmf_rdma_request *rdma_req_inv; 715 /* construct an opcode that specifies BIDIRECTIONAL transfer */ 716 uint8_t opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 717 718 rdma_recv_inv = create_recv(&rqpair, opc); 719 rdma_req_inv = create_req(&rqpair, rdma_recv_inv); 720 721 /* NEW -> RDMA_REQUEST_STATE_COMPLETING */ 722 rqpair.current_recv_depth = 1; 723 progress = nvmf_rdma_request_process(&rtransport, rdma_req_inv); 724 CU_ASSERT(progress == true); 725 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_COMPLETING); 726 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 727 CU_ASSERT(rdma_req_inv->req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 728 729 /* RDMA_REQUEST_STATE_COMPLETED -> FREE */ 730 rdma_req_inv->state = RDMA_REQUEST_STATE_COMPLETED; 731 nvmf_rdma_request_process(&rtransport, rdma_req_inv); 732 CU_ASSERT(rdma_req_inv->state == RDMA_REQUEST_STATE_FREE); 733 734 free_recv(rdma_recv_inv); 735 free_req(rdma_req_inv); 736 poller_reset(&poller, &group); 737 qpair_reset(&rqpair, &poller, &device, &resources, &rtransport.transport); 738 } 739 740 spdk_mempool_free(rtransport.transport.data_buf_pool); 741 spdk_mempool_free(rtransport.data_wr_pool); 742 } 743 744 #define TEST_GROUPS_COUNT 5 745 static void 746 test_nvmf_rdma_get_optimal_poll_group(void) 747 { 748 struct spdk_nvmf_rdma_transport rtransport = {}; 749 struct spdk_nvmf_transport *transport = &rtransport.transport; 750 struct spdk_nvmf_rdma_qpair rqpair = {}; 751 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 752 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 753 struct spdk_nvmf_transport_poll_group *result; 754 uint32_t i; 755 756 rqpair.qpair.transport = transport; 757 pthread_mutex_init(&rtransport.lock, NULL); 758 TAILQ_INIT(&rtransport.poll_groups); 759 760 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 761 groups[i] = nvmf_rdma_poll_group_create(transport); 762 CU_ASSERT(groups[i] != NULL); 763 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 764 groups[i]->transport = transport; 765 } 766 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 767 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 768 769 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 770 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 771 rqpair.qpair.qid = 0; 772 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 773 CU_ASSERT(result == groups[i]); 774 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 775 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 776 777 rqpair.qpair.qid = 1; 778 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 779 CU_ASSERT(result == groups[i]); 780 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 781 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 782 } 783 /* wrap around, admin/io pg point to the first pg 784 Destroy all poll groups except of the last one */ 785 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 786 nvmf_rdma_poll_group_destroy(groups[i]); 787 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 788 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 789 } 790 791 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 792 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 793 794 /* Check that pointers to the next admin/io poll groups are not changed */ 795 rqpair.qpair.qid = 0; 796 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 797 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 798 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 799 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 800 801 rqpair.qpair.qid = 1; 802 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 803 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 804 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 805 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 806 807 /* Remove the last poll group, check that pointers are NULL */ 808 nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 809 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 810 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 811 812 /* Request optimal poll group, result must be NULL */ 813 rqpair.qpair.qid = 0; 814 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 815 CU_ASSERT(result == NULL); 816 817 rqpair.qpair.qid = 1; 818 result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 819 CU_ASSERT(result == NULL); 820 821 pthread_mutex_destroy(&rtransport.lock); 822 } 823 #undef TEST_GROUPS_COUNT 824 825 static void 826 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 827 { 828 struct spdk_nvmf_rdma_transport rtransport; 829 struct spdk_nvmf_rdma_device device; 830 struct spdk_nvmf_rdma_request rdma_req = {}; 831 struct spdk_nvmf_rdma_recv recv; 832 struct spdk_nvmf_rdma_poll_group group; 833 struct spdk_nvmf_rdma_qpair rqpair; 834 struct spdk_nvmf_rdma_poller poller; 835 union nvmf_c2h_msg cpl; 836 union nvmf_h2c_msg cmd; 837 struct spdk_nvme_sgl_descriptor *sgl; 838 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 839 struct spdk_nvmf_rdma_request_data data; 840 char data2_buffer[8192]; 841 struct spdk_nvmf_rdma_request_data *data2 = (struct spdk_nvmf_rdma_request_data *)data2_buffer; 842 const uint32_t data_bs = 512; 843 const uint32_t md_size = 8; 844 int rc, i; 845 void *aligned_buffer; 846 847 data.wr.sg_list = data.sgl; 848 STAILQ_INIT(&group.group.buf_cache); 849 group.group.buf_cache_size = 0; 850 group.group.buf_cache_count = 0; 851 group.group.transport = &rtransport.transport; 852 poller.group = &group; 853 rqpair.poller = &poller; 854 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 855 856 sgl = &cmd.nvme_cmd.dptr.sgl1; 857 rdma_req.recv = &recv; 858 rdma_req.req.cmd = &cmd; 859 rdma_req.req.rsp = &cpl; 860 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 861 rdma_req.req.qpair = &rqpair.qpair; 862 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 863 864 rtransport.transport.opts = g_rdma_ut_transport_opts; 865 rtransport.data_wr_pool = NULL; 866 rtransport.transport.data_buf_pool = NULL; 867 868 device.attr.device_cap_flags = 0; 869 device.map = NULL; 870 sgl->keyed.key = 0xEEEE; 871 sgl->address = 0xFFFF; 872 rdma_req.recv->buf = (void *)0xDDDD; 873 874 /* Test 1: sgl type: keyed data block subtype: address */ 875 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 876 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 877 878 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 879 MOCK_SET(spdk_mempool_get, (void *)0x2000); 880 reset_nvmf_rdma_request(&rdma_req); 881 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 882 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 883 0, 0, 0, 0, 0); 884 rdma_req.req.dif.dif_insert_or_strip = true; 885 rtransport.transport.opts.io_unit_size = data_bs * 8; 886 sgl->keyed.length = data_bs * 4; 887 888 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 889 890 CU_ASSERT(rc == 0); 891 CU_ASSERT(rdma_req.req.data_from_pool == true); 892 CU_ASSERT(rdma_req.req.length == data_bs * 4); 893 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 894 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 895 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 896 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 897 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 898 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 899 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 900 901 for (i = 0; i < 4; ++i) { 902 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 903 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 904 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 905 } 906 907 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 908 block size 512 */ 909 MOCK_SET(spdk_mempool_get, (void *)0x2000); 910 reset_nvmf_rdma_request(&rdma_req); 911 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 912 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 913 0, 0, 0, 0, 0); 914 rdma_req.req.dif.dif_insert_or_strip = true; 915 rtransport.transport.opts.io_unit_size = data_bs * 4; 916 sgl->keyed.length = data_bs * 4; 917 918 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 919 920 CU_ASSERT(rc == 0); 921 CU_ASSERT(rdma_req.req.data_from_pool == true); 922 CU_ASSERT(rdma_req.req.length == data_bs * 4); 923 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 924 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 925 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 926 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 927 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 928 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 929 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 930 931 for (i = 0; i < 3; ++i) { 932 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 933 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 934 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 935 } 936 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 937 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 938 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 939 940 /* 2nd buffer consumed */ 941 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 942 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 943 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 944 945 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 946 MOCK_SET(spdk_mempool_get, (void *)0x2000); 947 reset_nvmf_rdma_request(&rdma_req); 948 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 949 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 950 0, 0, 0, 0, 0); 951 rdma_req.req.dif.dif_insert_or_strip = true; 952 rtransport.transport.opts.io_unit_size = data_bs; 953 sgl->keyed.length = data_bs; 954 955 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 956 957 CU_ASSERT(rc == 0); 958 CU_ASSERT(rdma_req.req.data_from_pool == true); 959 CU_ASSERT(rdma_req.req.length == data_bs); 960 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 961 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 962 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 963 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 964 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 965 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 966 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 967 968 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 969 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 970 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 971 972 CU_ASSERT(rdma_req.req.iovcnt == 2); 973 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 974 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 975 /* 2nd buffer consumed for metadata */ 976 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 977 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 978 979 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 980 block size 512 */ 981 MOCK_SET(spdk_mempool_get, (void *)0x2000); 982 reset_nvmf_rdma_request(&rdma_req); 983 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 984 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 985 0, 0, 0, 0, 0); 986 rdma_req.req.dif.dif_insert_or_strip = true; 987 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 988 sgl->keyed.length = data_bs * 4; 989 990 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 991 992 CU_ASSERT(rc == 0); 993 CU_ASSERT(rdma_req.req.data_from_pool == true); 994 CU_ASSERT(rdma_req.req.length == data_bs * 4); 995 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 996 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 997 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 998 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 999 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1000 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1001 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1002 1003 for (i = 0; i < 4; ++i) { 1004 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1005 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1006 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1007 } 1008 1009 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1010 block size 512 */ 1011 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1012 reset_nvmf_rdma_request(&rdma_req); 1013 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1014 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1015 0, 0, 0, 0, 0); 1016 rdma_req.req.dif.dif_insert_or_strip = true; 1017 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1018 sgl->keyed.length = data_bs * 4; 1019 1020 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1021 1022 CU_ASSERT(rc == 0); 1023 CU_ASSERT(rdma_req.req.data_from_pool == true); 1024 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1025 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1026 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1027 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1028 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1029 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1030 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1031 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1032 1033 for (i = 0; i < 2; ++i) { 1034 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1035 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1036 } 1037 for (i = 0; i < 2; ++i) { 1038 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1039 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1040 } 1041 1042 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1043 block size 512 */ 1044 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1045 reset_nvmf_rdma_request(&rdma_req); 1046 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1047 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1048 0, 0, 0, 0, 0); 1049 rdma_req.req.dif.dif_insert_or_strip = true; 1050 rtransport.transport.opts.io_unit_size = data_bs * 4; 1051 sgl->keyed.length = data_bs * 6; 1052 1053 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1054 1055 CU_ASSERT(rc == 0); 1056 CU_ASSERT(rdma_req.req.data_from_pool == true); 1057 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1058 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1059 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1060 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1061 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1062 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1063 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1064 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1065 1066 for (i = 0; i < 3; ++i) { 1067 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1068 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1069 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == RDMA_UT_LKEY); 1070 } 1071 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1072 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1073 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == RDMA_UT_LKEY); 1074 1075 /* 2nd IO buffer consumed */ 1076 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1077 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1078 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == RDMA_UT_LKEY); 1079 1080 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1081 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1082 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == RDMA_UT_LKEY); 1083 1084 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1085 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1086 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == RDMA_UT_LKEY); 1087 1088 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1089 one WR can hold. Additional WR is chained */ 1090 MOCK_SET(spdk_mempool_get, data2_buffer); 1091 aligned_buffer = (void *)((uintptr_t)(data2_buffer + NVMF_DATA_BUFFER_MASK) & 1092 ~NVMF_DATA_BUFFER_MASK); 1093 reset_nvmf_rdma_request(&rdma_req); 1094 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1095 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1096 0, 0, 0, 0, 0); 1097 rdma_req.req.dif.dif_insert_or_strip = true; 1098 rtransport.transport.opts.io_unit_size = data_bs * 16; 1099 sgl->keyed.length = data_bs * 16; 1100 1101 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1102 1103 CU_ASSERT(rc == 0); 1104 CU_ASSERT(rdma_req.req.data_from_pool == true); 1105 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1106 CU_ASSERT(rdma_req.req.iovcnt == 2); 1107 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1108 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1109 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1110 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1111 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1112 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1113 /* additional wr from pool */ 1114 CU_ASSERT(rdma_req.data.wr.next == (void *)&data2->wr); 1115 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1116 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1117 1118 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1119 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1120 reset_nvmf_rdma_request(&rdma_req); 1121 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1122 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1123 0, 0, 0, 0, 0); 1124 rdma_req.req.dif.dif_insert_or_strip = true; 1125 rtransport.transport.opts.io_unit_size = 516; 1126 sgl->keyed.length = data_bs * 2; 1127 1128 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1129 1130 CU_ASSERT(rc == 0); 1131 CU_ASSERT(rdma_req.req.data_from_pool == true); 1132 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1133 CU_ASSERT(rdma_req.req.iovcnt == 3); 1134 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1135 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1136 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1137 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1138 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1139 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1140 1141 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1142 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1143 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == RDMA_UT_LKEY); 1144 1145 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1146 is located at the beginning of that buffer */ 1147 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1148 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1149 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == RDMA_UT_LKEY); 1150 1151 /* Test 2: Multi SGL */ 1152 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1153 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1154 sgl->address = 0; 1155 rdma_req.recv->buf = (void *)&sgl_desc; 1156 MOCK_SET(spdk_mempool_get, &data); 1157 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1158 ~NVMF_DATA_BUFFER_MASK); 1159 1160 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1161 reset_nvmf_rdma_request(&rdma_req); 1162 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1163 SPDK_DIF_TYPE1, 1164 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1165 rdma_req.req.dif.dif_insert_or_strip = true; 1166 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1167 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1168 1169 for (i = 0; i < 2; i++) { 1170 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1171 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1172 sgl_desc[i].keyed.length = data_bs * 4; 1173 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1174 sgl_desc[i].keyed.key = 0x44; 1175 } 1176 1177 rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1178 1179 CU_ASSERT(rc == 0); 1180 CU_ASSERT(rdma_req.req.data_from_pool == true); 1181 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1182 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1183 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1184 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1185 for (i = 0; i < 4; ++i) { 1186 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1187 (data_bs + md_size)); 1188 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1189 } 1190 1191 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1192 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1193 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1194 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1195 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1196 CU_ASSERT(data.wr.num_sge == 4); 1197 for (i = 0; i < 4; ++i) { 1198 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1199 (data_bs + md_size)); 1200 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1201 } 1202 1203 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1204 } 1205 1206 int main(int argc, char **argv) 1207 { 1208 CU_pSuite suite = NULL; 1209 unsigned int num_failures; 1210 1211 CU_set_error_action(CUEA_ABORT); 1212 CU_initialize_registry(); 1213 1214 suite = CU_add_suite("nvmf", NULL, NULL); 1215 1216 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl); 1217 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process); 1218 CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group); 1219 CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md); 1220 1221 CU_basic_set_mode(CU_BRM_VERBOSE); 1222 CU_basic_run_tests(); 1223 num_failures = CU_get_number_of_failures(); 1224 CU_cleanup_registry(); 1225 return num_failures; 1226 } 1227