1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "nvmf/rdma.c" 38 #include "nvmf/transport.c" 39 40 uint64_t g_mr_size; 41 uint64_t g_mr_next_size; 42 struct ibv_mr g_rdma_mr; 43 44 #define RDMA_UT_UNITS_IN_MAX_IO 16 45 46 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 47 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 48 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 49 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 50 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 51 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 52 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 53 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 54 }; 55 56 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp; 57 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc; 58 59 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 60 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 61 uint64_t size, uint64_t translation), 0); 62 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 63 uint64_t size), 0); 64 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 65 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 66 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 67 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 68 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 69 70 struct spdk_trace_histories *g_trace_histories; 71 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 72 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 73 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 74 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 75 uint8_t arg1_type, const char *arg1_name)); 76 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 77 uint32_t size, uint64_t object_id, uint64_t arg1)); 78 79 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 80 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 81 const struct spdk_nvme_transport_id *trid2), 0); 82 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 83 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 84 struct spdk_dif_ctx *dif_ctx), false); 85 86 uint64_t 87 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 88 { 89 if (g_mr_size != 0) { 90 *(uint32_t *)size = g_mr_size; 91 if (g_mr_next_size != 0) { 92 g_mr_size = g_mr_next_size; 93 } 94 } 95 96 return (uint64_t)&g_rdma_mr; 97 } 98 99 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 100 { 101 int i; 102 103 rdma_req->req.length = 0; 104 rdma_req->req.data_from_pool = false; 105 rdma_req->req.data = NULL; 106 rdma_req->data.wr.num_sge = 0; 107 rdma_req->data.wr.wr.rdma.remote_addr = 0; 108 rdma_req->data.wr.wr.rdma.rkey = 0; 109 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif)); 110 111 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 112 rdma_req->req.iov[i].iov_base = 0; 113 rdma_req->req.iov[i].iov_len = 0; 114 rdma_req->req.buffers[i] = 0; 115 rdma_req->data.wr.sg_list[i].addr = 0; 116 rdma_req->data.wr.sg_list[i].length = 0; 117 rdma_req->data.wr.sg_list[i].lkey = 0; 118 } 119 rdma_req->req.iovcnt = 0; 120 } 121 122 static void 123 test_spdk_nvmf_rdma_request_parse_sgl(void) 124 { 125 struct spdk_nvmf_rdma_transport rtransport; 126 struct spdk_nvmf_rdma_device device; 127 struct spdk_nvmf_rdma_request rdma_req = {}; 128 struct spdk_nvmf_rdma_recv recv; 129 struct spdk_nvmf_rdma_poll_group group; 130 struct spdk_nvmf_rdma_qpair rqpair; 131 struct spdk_nvmf_rdma_poller poller; 132 union nvmf_c2h_msg cpl; 133 union nvmf_h2c_msg cmd; 134 struct spdk_nvme_sgl_descriptor *sgl; 135 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 136 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 137 struct spdk_nvmf_rdma_request_data data; 138 struct spdk_nvmf_transport_pg_cache_buf buffer; 139 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 140 int rc, i; 141 142 data.wr.sg_list = data.sgl; 143 STAILQ_INIT(&group.group.buf_cache); 144 group.group.buf_cache_size = 0; 145 group.group.buf_cache_count = 0; 146 group.group.transport = &rtransport.transport; 147 STAILQ_INIT(&group.retired_bufs); 148 poller.group = &group; 149 rqpair.poller = &poller; 150 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 151 152 sgl = &cmd.nvme_cmd.dptr.sgl1; 153 rdma_req.recv = &recv; 154 rdma_req.req.cmd = &cmd; 155 rdma_req.req.rsp = &cpl; 156 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 157 rdma_req.req.qpair = &rqpair.qpair; 158 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 159 160 rtransport.transport.opts = g_rdma_ut_transport_opts; 161 rtransport.data_wr_pool = NULL; 162 rtransport.transport.data_buf_pool = NULL; 163 164 device.attr.device_cap_flags = 0; 165 g_rdma_mr.lkey = 0xABCD; 166 sgl->keyed.key = 0xEEEE; 167 sgl->address = 0xFFFF; 168 rdma_req.recv->buf = (void *)0xDDDD; 169 170 /* Test 1: sgl type: keyed data block subtype: address */ 171 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 172 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 173 174 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 175 MOCK_SET(spdk_mempool_get, (void *)0x2000); 176 reset_nvmf_rdma_request(&rdma_req); 177 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 178 179 device.map = (void *)0x0; 180 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 181 CU_ASSERT(rc == 0); 182 CU_ASSERT(rdma_req.req.data_from_pool == true); 183 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 184 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 185 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 186 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 187 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 188 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 189 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 190 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 191 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 192 193 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 194 reset_nvmf_rdma_request(&rdma_req); 195 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 196 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 197 198 CU_ASSERT(rc == 0); 199 CU_ASSERT(rdma_req.req.data_from_pool == true); 200 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 201 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 202 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 203 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 204 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 205 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 206 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 207 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 208 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 209 } 210 211 /* Part 3: simple I/O one SGL larger than the transport max io size */ 212 reset_nvmf_rdma_request(&rdma_req); 213 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 214 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 215 216 CU_ASSERT(rc == -1); 217 218 /* Part 4: Pretend there are no buffer pools */ 219 MOCK_SET(spdk_mempool_get, NULL); 220 reset_nvmf_rdma_request(&rdma_req); 221 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 222 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 223 224 CU_ASSERT(rc == 0); 225 CU_ASSERT(rdma_req.req.data_from_pool == false); 226 CU_ASSERT(rdma_req.req.data == NULL); 227 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 228 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 229 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 230 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 231 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 232 233 rdma_req.recv->buf = (void *)0xDDDD; 234 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 235 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 236 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 237 238 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 239 reset_nvmf_rdma_request(&rdma_req); 240 sgl->address = 0; 241 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 242 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 243 244 CU_ASSERT(rc == 0); 245 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 246 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 247 CU_ASSERT(rdma_req.req.data_from_pool == false); 248 249 /* Part 2: I/O offset + length too large */ 250 reset_nvmf_rdma_request(&rdma_req); 251 sgl->address = rtransport.transport.opts.in_capsule_data_size; 252 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 253 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 254 255 CU_ASSERT(rc == -1); 256 257 /* Part 3: I/O too large */ 258 reset_nvmf_rdma_request(&rdma_req); 259 sgl->address = 0; 260 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 261 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 262 263 CU_ASSERT(rc == -1); 264 265 /* Test 3: Multi SGL */ 266 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 267 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 268 sgl->address = 0; 269 rdma_req.recv->buf = (void *)&sgl_desc; 270 MOCK_SET(spdk_mempool_get, &data); 271 272 /* part 1: 2 segments each with 1 wr. */ 273 reset_nvmf_rdma_request(&rdma_req); 274 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 275 for (i = 0; i < 2; i++) { 276 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 277 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 278 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 279 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 280 sgl_desc[i].keyed.key = 0x44; 281 } 282 283 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 284 285 CU_ASSERT(rc == 0); 286 CU_ASSERT(rdma_req.req.data_from_pool == true); 287 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 288 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 289 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 290 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 291 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 292 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 293 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 294 CU_ASSERT(data.wr.num_sge == 1); 295 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 296 297 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 298 reset_nvmf_rdma_request(&rdma_req); 299 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 300 for (i = 0; i < 2; i++) { 301 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 302 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 303 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 304 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 305 sgl_desc[i].keyed.key = 0x44; 306 } 307 308 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 309 310 CU_ASSERT(rc == 0); 311 CU_ASSERT(rdma_req.req.data_from_pool == true); 312 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 313 CU_ASSERT(rdma_req.req.iovcnt == 16); 314 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 315 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 316 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 317 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 318 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 319 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 320 CU_ASSERT(data.wr.num_sge == 8); 321 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 322 323 /* part 3: 2 segments, one very large, one very small */ 324 reset_nvmf_rdma_request(&rdma_req); 325 for (i = 0; i < 2; i++) { 326 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 327 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 328 sgl_desc[i].keyed.key = 0x44; 329 } 330 331 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 332 rtransport.transport.opts.io_unit_size / 2; 333 sgl_desc[0].address = 0x4000; 334 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 335 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 336 rtransport.transport.opts.io_unit_size / 2; 337 338 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 339 340 CU_ASSERT(rc == 0); 341 CU_ASSERT(rdma_req.req.data_from_pool == true); 342 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 343 CU_ASSERT(rdma_req.req.iovcnt == 17); 344 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 345 for (i = 0; i < 15; i++) { 346 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 347 } 348 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 349 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 350 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 351 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 352 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 353 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 354 rtransport.transport.opts.io_unit_size / 2); 355 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 356 CU_ASSERT(data.wr.num_sge == 1); 357 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 358 359 /* Test 4: use PG buffer cache */ 360 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 361 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 362 sgl->address = 0xFFFF; 363 rdma_req.recv->buf = (void *)0xDDDD; 364 g_rdma_mr.lkey = 0xABCD; 365 sgl->keyed.key = 0xEEEE; 366 367 for (i = 0; i < 4; i++) { 368 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 369 } 370 371 /* part 1: use the four buffers from the pg cache */ 372 group.group.buf_cache_size = 4; 373 group.group.buf_cache_count = 4; 374 MOCK_SET(spdk_mempool_get, (void *)0x2000); 375 reset_nvmf_rdma_request(&rdma_req); 376 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 377 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 378 379 SPDK_CU_ASSERT_FATAL(rc == 0); 380 CU_ASSERT(rdma_req.req.data_from_pool == true); 381 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 382 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 383 ~NVMF_DATA_BUFFER_MASK)); 384 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 385 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 386 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 387 CU_ASSERT(group.group.buf_cache_count == 0); 388 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 389 for (i = 0; i < 4; i++) { 390 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 391 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 392 ~NVMF_DATA_BUFFER_MASK)); 393 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 394 } 395 396 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 397 reset_nvmf_rdma_request(&rdma_req); 398 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 399 400 SPDK_CU_ASSERT_FATAL(rc == 0); 401 CU_ASSERT(rdma_req.req.data_from_pool == true); 402 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 403 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 404 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 405 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 406 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 407 CU_ASSERT(group.group.buf_cache_count == 0); 408 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 409 for (i = 0; i < 4; i++) { 410 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 411 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 412 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 413 CU_ASSERT(group.group.buf_cache_count == 0); 414 } 415 416 /* part 3: half and half */ 417 group.group.buf_cache_count = 2; 418 419 for (i = 0; i < 2; i++) { 420 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 421 } 422 reset_nvmf_rdma_request(&rdma_req); 423 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 424 425 SPDK_CU_ASSERT_FATAL(rc == 0); 426 CU_ASSERT(rdma_req.req.data_from_pool == true); 427 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 428 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 429 ~NVMF_DATA_BUFFER_MASK)); 430 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 431 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 432 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 433 CU_ASSERT(group.group.buf_cache_count == 0); 434 for (i = 0; i < 2; i++) { 435 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 436 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 437 ~NVMF_DATA_BUFFER_MASK)); 438 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 439 } 440 for (i = 2; i < 4; i++) { 441 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 442 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 443 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 444 } 445 446 reset_nvmf_rdma_request(&rdma_req); 447 /* Test 5 dealing with a buffer split over two Memory Regions */ 448 MOCK_SET(spdk_mempool_get, (void *)&buffer); 449 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 450 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 451 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 452 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 453 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 454 455 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 456 SPDK_CU_ASSERT_FATAL(rc == 0); 457 CU_ASSERT(rdma_req.req.data_from_pool == true); 458 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 459 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 460 ~NVMF_DATA_BUFFER_MASK)); 461 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 462 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 463 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 464 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 465 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 466 ~NVMF_DATA_BUFFER_MASK)); 467 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 468 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 469 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 470 CU_ASSERT(buffer_ptr == &buffer); 471 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 472 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 473 g_mr_size = 0; 474 g_mr_next_size = 0; 475 476 reset_nvmf_rdma_request(&rdma_req); 477 } 478 479 static struct spdk_nvmf_rdma_recv * 480 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 481 { 482 struct spdk_nvmf_rdma_recv *rdma_recv; 483 union nvmf_h2c_msg *cmd; 484 struct spdk_nvme_sgl_descriptor *sgl; 485 486 rdma_recv = calloc(1, sizeof(*rdma_recv)); 487 rdma_recv->qpair = rqpair; 488 cmd = calloc(1, sizeof(*cmd)); 489 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 490 cmd->nvme_cmd.opc = opc; 491 sgl = &cmd->nvme_cmd.dptr.sgl1; 492 sgl->keyed.key = 0xEEEE; 493 sgl->address = 0xFFFF; 494 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 495 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 496 sgl->keyed.length = 1; 497 498 return rdma_recv; 499 } 500 501 static void 502 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 503 { 504 free((void *)rdma_recv->sgl[0].addr); 505 free(rdma_recv); 506 } 507 508 static struct spdk_nvmf_rdma_request * 509 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 510 struct spdk_nvmf_rdma_recv *rdma_recv) 511 { 512 struct spdk_nvmf_rdma_request *rdma_req; 513 union nvmf_c2h_msg *cpl; 514 515 rdma_req = calloc(1, sizeof(*rdma_req)); 516 rdma_req->recv = rdma_recv; 517 rdma_req->req.qpair = &rqpair->qpair; 518 rdma_req->state = RDMA_REQUEST_STATE_NEW; 519 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 520 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 521 cpl = calloc(1, sizeof(*cpl)); 522 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 523 rdma_req->req.rsp = cpl; 524 525 return rdma_req; 526 } 527 528 static void 529 free_req(struct spdk_nvmf_rdma_request *rdma_req) 530 { 531 free((void *)rdma_req->rsp.sgl[0].addr); 532 free(rdma_req); 533 } 534 535 static void 536 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 537 struct spdk_nvmf_rdma_poller *poller, 538 struct spdk_nvmf_rdma_port *port, 539 struct spdk_nvmf_rdma_resources *resources) 540 { 541 memset(rqpair, 0, sizeof(*rqpair)); 542 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 543 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 544 rqpair->poller = poller; 545 rqpair->port = port; 546 rqpair->resources = resources; 547 rqpair->qpair.qid = 1; 548 rqpair->ibv_state = IBV_QPS_RTS; 549 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 550 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 551 rqpair->max_send_depth = 16; 552 rqpair->max_read_depth = 16; 553 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 554 } 555 556 static void 557 poller_reset(struct spdk_nvmf_rdma_poller *poller, 558 struct spdk_nvmf_rdma_poll_group *group) 559 { 560 memset(poller, 0, sizeof(*poller)); 561 STAILQ_INIT(&poller->qpairs_pending_recv); 562 STAILQ_INIT(&poller->qpairs_pending_send); 563 poller->group = group; 564 } 565 566 static void 567 test_spdk_nvmf_rdma_request_process(void) 568 { 569 struct spdk_nvmf_rdma_transport rtransport = {}; 570 struct spdk_nvmf_rdma_poll_group group = {}; 571 struct spdk_nvmf_rdma_poller poller = {}; 572 struct spdk_nvmf_rdma_port port = {}; 573 struct spdk_nvmf_rdma_device device = {}; 574 struct spdk_nvmf_rdma_resources resources = {}; 575 struct spdk_nvmf_rdma_qpair rqpair = {}; 576 struct spdk_nvmf_rdma_recv *rdma_recv; 577 struct spdk_nvmf_rdma_request *rdma_req; 578 bool progress; 579 580 STAILQ_INIT(&group.group.buf_cache); 581 STAILQ_INIT(&group.group.pending_buf_queue); 582 group.group.buf_cache_size = 0; 583 group.group.buf_cache_count = 0; 584 port.device = &device; 585 poller_reset(&poller, &group); 586 qpair_reset(&rqpair, &poller, &port, &resources); 587 588 rtransport.transport.opts = g_rdma_ut_transport_opts; 589 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 590 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 591 sizeof(struct spdk_nvmf_rdma_request_data), 592 0, 0); 593 MOCK_CLEAR(spdk_mempool_get); 594 595 device.attr.device_cap_flags = 0; 596 device.map = (void *)0x0; 597 g_rdma_mr.lkey = 0xABCD; 598 599 /* Test 1: single SGL READ request */ 600 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 601 rdma_req = create_req(&rqpair, rdma_recv); 602 rqpair.current_recv_depth = 1; 603 /* NEW -> EXECUTING */ 604 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 605 CU_ASSERT(progress == true); 606 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 607 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 608 /* EXECUTED -> TRANSFERRING_C2H */ 609 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 610 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 611 CU_ASSERT(progress == true); 612 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 613 CU_ASSERT(rdma_req->recv == NULL); 614 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 615 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 616 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 617 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 618 /* COMPLETED -> FREE */ 619 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 620 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 621 CU_ASSERT(progress == true); 622 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 623 624 free_recv(rdma_recv); 625 free_req(rdma_req); 626 poller_reset(&poller, &group); 627 qpair_reset(&rqpair, &poller, &port, &resources); 628 629 /* Test 2: single SGL WRITE request */ 630 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 631 rdma_req = create_req(&rqpair, rdma_recv); 632 rqpair.current_recv_depth = 1; 633 /* NEW -> TRANSFERRING_H2C */ 634 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 635 CU_ASSERT(progress == true); 636 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 637 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 638 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 639 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->data.wr); 640 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 641 STAILQ_INIT(&poller.qpairs_pending_send); 642 /* READY_TO_EXECUTE -> EXECUTING */ 643 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 644 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 645 CU_ASSERT(progress == true); 646 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 647 /* EXECUTED -> COMPLETING */ 648 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 649 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 650 CU_ASSERT(progress == true); 651 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 652 CU_ASSERT(rdma_req->recv == NULL); 653 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->rsp.wr); 654 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 655 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 656 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 657 /* COMPLETED -> FREE */ 658 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 659 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 660 CU_ASSERT(progress == true); 661 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 662 663 free_recv(rdma_recv); 664 free_req(rdma_req); 665 poller_reset(&poller, &group); 666 qpair_reset(&rqpair, &poller, &port, &resources); 667 668 /* Test 3: WRITE+WRITE ibv_send batching */ 669 { 670 struct spdk_nvmf_rdma_recv *recv1, *recv2; 671 struct spdk_nvmf_rdma_request *req1, *req2; 672 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 673 req1 = create_req(&rqpair, recv1); 674 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 675 req2 = create_req(&rqpair, recv2); 676 677 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 678 rqpair.current_recv_depth = 1; 679 spdk_nvmf_rdma_request_process(&rtransport, req1); 680 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 681 /* WRITE 1 is the first in batching list */ 682 CU_ASSERT(rqpair.sends_to_post.first == &req1->data.wr); 683 CU_ASSERT(rqpair.sends_to_post.last == &req1->data.wr); 684 685 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 686 rqpair.current_recv_depth = 2; 687 spdk_nvmf_rdma_request_process(&rtransport, req2); 688 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 689 /* WRITE 2 is now also in the batching list */ 690 CU_ASSERT(rqpair.sends_to_post.first->next == &req2->data.wr); 691 CU_ASSERT(rqpair.sends_to_post.last == &req2->data.wr); 692 693 /* Send everything */ 694 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 695 STAILQ_INIT(&poller.qpairs_pending_send); 696 697 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 698 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 699 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 700 spdk_nvmf_rdma_request_process(&rtransport, req1); 701 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 702 /* WRITE 1: EXECUTED -> COMPLETING */ 703 req1->state = RDMA_REQUEST_STATE_EXECUTED; 704 spdk_nvmf_rdma_request_process(&rtransport, req1); 705 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 706 CU_ASSERT(rqpair.sends_to_post.first == &req1->rsp.wr); 707 CU_ASSERT(rqpair.sends_to_post.last == &req1->rsp.wr); 708 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 709 STAILQ_INIT(&poller.qpairs_pending_send); 710 /* WRITE 1: COMPLETED -> FREE */ 711 req1->state = RDMA_REQUEST_STATE_COMPLETED; 712 spdk_nvmf_rdma_request_process(&rtransport, req1); 713 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 714 715 /* Now WRITE 2 has finished reading and completes */ 716 /* WRITE 2: COMPLETED -> FREE */ 717 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 718 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 719 spdk_nvmf_rdma_request_process(&rtransport, req2); 720 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 721 /* WRITE 1: EXECUTED -> COMPLETING */ 722 req2->state = RDMA_REQUEST_STATE_EXECUTED; 723 spdk_nvmf_rdma_request_process(&rtransport, req2); 724 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 725 CU_ASSERT(rqpair.sends_to_post.first == &req2->rsp.wr); 726 CU_ASSERT(rqpair.sends_to_post.last == &req2->rsp.wr); 727 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 728 STAILQ_INIT(&poller.qpairs_pending_send); 729 /* WRITE 1: COMPLETED -> FREE */ 730 req2->state = RDMA_REQUEST_STATE_COMPLETED; 731 spdk_nvmf_rdma_request_process(&rtransport, req2); 732 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 733 734 free_recv(recv1); 735 free_req(req1); 736 free_recv(recv2); 737 free_req(req2); 738 poller_reset(&poller, &group); 739 qpair_reset(&rqpair, &poller, &port, &resources); 740 } 741 742 spdk_mempool_free(rtransport.transport.data_buf_pool); 743 spdk_mempool_free(rtransport.data_wr_pool); 744 } 745 746 #define TEST_GROUPS_COUNT 5 747 static void 748 test_spdk_nvmf_rdma_get_optimal_poll_group(void) 749 { 750 struct spdk_nvmf_rdma_transport rtransport = {}; 751 struct spdk_nvmf_transport *transport = &rtransport.transport; 752 struct spdk_nvmf_rdma_qpair rqpair = {}; 753 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 754 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 755 struct spdk_nvmf_transport_poll_group *result; 756 uint32_t i; 757 758 rqpair.qpair.transport = transport; 759 pthread_mutex_init(&rtransport.lock, NULL); 760 TAILQ_INIT(&rtransport.poll_groups); 761 762 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 763 groups[i] = spdk_nvmf_rdma_poll_group_create(transport); 764 CU_ASSERT(groups[i] != NULL); 765 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 766 groups[i]->transport = transport; 767 } 768 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 769 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 770 771 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 772 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 773 rqpair.qpair.qid = 0; 774 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 775 CU_ASSERT(result == groups[i]); 776 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 777 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 778 779 rqpair.qpair.qid = 1; 780 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 781 CU_ASSERT(result == groups[i]); 782 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 783 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 784 } 785 /* wrap around, admin/io pg point to the first pg 786 Destroy all poll groups except of the last one */ 787 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 788 spdk_nvmf_rdma_poll_group_destroy(groups[i]); 789 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 790 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 791 } 792 793 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 794 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 795 796 /* Check that pointers to the next admin/io poll groups are not changed */ 797 rqpair.qpair.qid = 0; 798 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 799 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 800 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 801 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 802 803 rqpair.qpair.qid = 1; 804 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 805 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 806 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 807 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 808 809 /* Remove the last poll group, check that pointers are NULL */ 810 spdk_nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 811 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 812 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 813 814 /* Request optimal poll group, result must be NULL */ 815 rqpair.qpair.qid = 0; 816 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 817 CU_ASSERT(result == NULL); 818 819 rqpair.qpair.qid = 1; 820 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 821 CU_ASSERT(result == NULL); 822 823 pthread_mutex_destroy(&rtransport.lock); 824 } 825 #undef TEST_GROUPS_COUNT 826 827 static void 828 test_spdk_nvmf_rdma_request_parse_sgl_with_md(void) 829 { 830 struct spdk_nvmf_rdma_transport rtransport; 831 struct spdk_nvmf_rdma_device device; 832 struct spdk_nvmf_rdma_request rdma_req = {}; 833 struct spdk_nvmf_rdma_recv recv; 834 struct spdk_nvmf_rdma_poll_group group; 835 struct spdk_nvmf_rdma_qpair rqpair; 836 struct spdk_nvmf_rdma_poller poller; 837 union nvmf_c2h_msg cpl; 838 union nvmf_h2c_msg cmd; 839 struct spdk_nvme_sgl_descriptor *sgl; 840 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 841 struct spdk_nvmf_rdma_request_data data; 842 struct spdk_nvmf_transport_pg_cache_buf buffer; 843 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 844 const uint32_t data_bs = 512; 845 const uint32_t md_size = 8; 846 int rc, i; 847 void *aligned_buffer; 848 849 data.wr.sg_list = data.sgl; 850 STAILQ_INIT(&group.group.buf_cache); 851 group.group.buf_cache_size = 0; 852 group.group.buf_cache_count = 0; 853 group.group.transport = &rtransport.transport; 854 STAILQ_INIT(&group.retired_bufs); 855 poller.group = &group; 856 rqpair.poller = &poller; 857 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 858 859 sgl = &cmd.nvme_cmd.dptr.sgl1; 860 rdma_req.recv = &recv; 861 rdma_req.req.cmd = &cmd; 862 rdma_req.req.rsp = &cpl; 863 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 864 rdma_req.req.qpair = &rqpair.qpair; 865 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 866 867 rtransport.transport.opts = g_rdma_ut_transport_opts; 868 rtransport.data_wr_pool = NULL; 869 rtransport.transport.data_buf_pool = NULL; 870 871 device.attr.device_cap_flags = 0; 872 device.map = NULL; 873 g_rdma_mr.lkey = 0xABCD; 874 sgl->keyed.key = 0xEEEE; 875 sgl->address = 0xFFFF; 876 rdma_req.recv->buf = (void *)0xDDDD; 877 878 /* Test 1: sgl type: keyed data block subtype: address */ 879 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 880 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 881 882 /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */ 883 MOCK_SET(spdk_mempool_get, (void *)0x2000); 884 reset_nvmf_rdma_request(&rdma_req); 885 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 886 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 887 0, 0, 0, 0, 0); 888 rdma_req.req.dif.dif_insert_or_strip = true; 889 rtransport.transport.opts.io_unit_size = data_bs * 8; 890 sgl->keyed.length = data_bs * 4; 891 892 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 893 894 CU_ASSERT(rc == 0); 895 CU_ASSERT(rdma_req.req.data_from_pool == true); 896 CU_ASSERT(rdma_req.req.length == data_bs * 4); 897 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 898 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 899 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 900 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 901 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 902 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 903 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 904 905 for (i = 0; i < 4; ++i) { 906 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 907 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 908 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 909 } 910 911 /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size, 912 block size 512 */ 913 MOCK_SET(spdk_mempool_get, (void *)0x2000); 914 reset_nvmf_rdma_request(&rdma_req); 915 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 916 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 917 0, 0, 0, 0, 0); 918 rdma_req.req.dif.dif_insert_or_strip = true; 919 rtransport.transport.opts.io_unit_size = data_bs * 4; 920 sgl->keyed.length = data_bs * 4; 921 922 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 923 924 CU_ASSERT(rc == 0); 925 CU_ASSERT(rdma_req.req.data_from_pool == true); 926 CU_ASSERT(rdma_req.req.length == data_bs * 4); 927 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 928 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 929 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 930 CU_ASSERT(rdma_req.data.wr.num_sge == 5); 931 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 932 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 933 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 934 935 for (i = 0; i < 3; ++i) { 936 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 937 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 938 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 939 } 940 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 941 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 942 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 943 944 /* 2nd buffer consumed */ 945 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 946 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 947 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 948 949 /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */ 950 MOCK_SET(spdk_mempool_get, (void *)0x2000); 951 reset_nvmf_rdma_request(&rdma_req); 952 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 953 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 954 0, 0, 0, 0, 0); 955 rdma_req.req.dif.dif_insert_or_strip = true; 956 rtransport.transport.opts.io_unit_size = data_bs; 957 sgl->keyed.length = data_bs; 958 959 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 960 961 CU_ASSERT(rc == 0); 962 CU_ASSERT(rdma_req.req.data_from_pool == true); 963 CU_ASSERT(rdma_req.req.length == data_bs); 964 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 965 CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size); 966 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 967 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 968 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 969 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 970 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 971 972 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 973 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs); 974 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 975 976 CU_ASSERT(rdma_req.req.iovcnt == 2); 977 CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000)); 978 CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs); 979 /* 2nd buffer consumed for metadata */ 980 CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000)); 981 CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size); 982 983 /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size, 984 block size 512 */ 985 MOCK_SET(spdk_mempool_get, (void *)0x2000); 986 reset_nvmf_rdma_request(&rdma_req); 987 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 988 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 989 0, 0, 0, 0, 0); 990 rdma_req.req.dif.dif_insert_or_strip = true; 991 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 992 sgl->keyed.length = data_bs * 4; 993 994 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 995 996 CU_ASSERT(rc == 0); 997 CU_ASSERT(rdma_req.req.data_from_pool == true); 998 CU_ASSERT(rdma_req.req.length == data_bs * 4); 999 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1000 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1001 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1002 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1003 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1004 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1005 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1006 1007 for (i = 0; i < 4; ++i) { 1008 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1009 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1010 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1011 } 1012 1013 /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size, 1014 block size 512 */ 1015 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1016 reset_nvmf_rdma_request(&rdma_req); 1017 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1018 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1019 0, 0, 0, 0, 0); 1020 rdma_req.req.dif.dif_insert_or_strip = true; 1021 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2; 1022 sgl->keyed.length = data_bs * 4; 1023 1024 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1025 1026 CU_ASSERT(rc == 0); 1027 CU_ASSERT(rdma_req.req.data_from_pool == true); 1028 CU_ASSERT(rdma_req.req.length == data_bs * 4); 1029 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1030 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4); 1031 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1032 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1033 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1034 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1035 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1036 1037 for (i = 0; i < 2; ++i) { 1038 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1039 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1040 } 1041 for (i = 0; i < 2; ++i) { 1042 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size)); 1043 CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs); 1044 } 1045 1046 /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size, 1047 block size 512 */ 1048 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1049 reset_nvmf_rdma_request(&rdma_req); 1050 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1051 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1052 0, 0, 0, 0, 0); 1053 rdma_req.req.dif.dif_insert_or_strip = true; 1054 rtransport.transport.opts.io_unit_size = data_bs * 4; 1055 sgl->keyed.length = data_bs * 6; 1056 1057 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1058 1059 CU_ASSERT(rc == 0); 1060 CU_ASSERT(rdma_req.req.data_from_pool == true); 1061 CU_ASSERT(rdma_req.req.length == data_bs * 6); 1062 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1063 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6); 1064 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 1065 CU_ASSERT(rdma_req.data.wr.num_sge == 7); 1066 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1067 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1068 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 1069 1070 for (i = 0; i < 3; ++i) { 1071 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size)); 1072 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1073 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1074 } 1075 CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size)); 1076 CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488); 1077 CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey); 1078 1079 /* 2nd IO buffer consumed */ 1080 CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000); 1081 CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24); 1082 CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey); 1083 1084 CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size); 1085 CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512); 1086 CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey); 1087 1088 CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2); 1089 CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512); 1090 CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey); 1091 1092 /* Part 7: simple I/O, number of SGL entries exceeds the number of entries 1093 one WR can hold. Additional WR is chained */ 1094 MOCK_SET(spdk_mempool_get, &data); 1095 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1096 ~NVMF_DATA_BUFFER_MASK); 1097 reset_nvmf_rdma_request(&rdma_req); 1098 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1099 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1100 0, 0, 0, 0, 0); 1101 rdma_req.req.dif.dif_insert_or_strip = true; 1102 rtransport.transport.opts.io_unit_size = data_bs * 16; 1103 sgl->keyed.length = data_bs * 16; 1104 1105 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1106 1107 CU_ASSERT(rc == 0); 1108 CU_ASSERT(rdma_req.req.data_from_pool == true); 1109 CU_ASSERT(rdma_req.req.length == data_bs * 16); 1110 CU_ASSERT(rdma_req.req.iovcnt == 2); 1111 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1112 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16); 1113 CU_ASSERT(rdma_req.req.data == aligned_buffer); 1114 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 1115 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1116 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1117 /* additional wr from pool */ 1118 CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr); 1119 CU_ASSERT(rdma_req.data.wr.next->num_sge == 1); 1120 CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr); 1121 1122 /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */ 1123 MOCK_SET(spdk_mempool_get, (void *)0x2000); 1124 reset_nvmf_rdma_request(&rdma_req); 1125 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1126 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1127 0, 0, 0, 0, 0); 1128 rdma_req.req.dif.dif_insert_or_strip = true; 1129 rtransport.transport.opts.io_unit_size = 516; 1130 sgl->keyed.length = data_bs * 2; 1131 1132 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1133 1134 CU_ASSERT(rc == 0); 1135 CU_ASSERT(rdma_req.req.data_from_pool == true); 1136 CU_ASSERT(rdma_req.req.length == data_bs * 2); 1137 CU_ASSERT(rdma_req.req.iovcnt == 3); 1138 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1139 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2); 1140 CU_ASSERT(rdma_req.req.data == (void *)0x2000); 1141 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1142 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1143 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1144 1145 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 1146 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512); 1147 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 1148 1149 /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata 1150 is located at the beginning of that buffer */ 1151 CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4); 1152 CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512); 1153 CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey); 1154 1155 /* Test 9 dealing with a buffer split over two Memory Regions */ 1156 MOCK_SET(spdk_mempool_get, (void *)&buffer); 1157 reset_nvmf_rdma_request(&rdma_req); 1158 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1159 SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 1160 0, 0, 0, 0, 0); 1161 rdma_req.req.dif.dif_insert_or_strip = true; 1162 rtransport.transport.opts.io_unit_size = data_bs * 4; 1163 sgl->keyed.length = data_bs * 2; 1164 g_mr_size = data_bs; 1165 g_mr_next_size = rtransport.transport.opts.io_unit_size; 1166 1167 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1168 SPDK_CU_ASSERT_FATAL(rc == 0); 1169 CU_ASSERT(rdma_req.req.data_from_pool == true); 1170 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 1171 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 1172 ~NVMF_DATA_BUFFER_MASK)); 1173 CU_ASSERT(rdma_req.data.wr.num_sge == 2); 1174 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 1175 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 1176 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 1177 for (i = 0; i < 2; i++) { 1178 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i * 1179 (data_bs + md_size)); 1180 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1181 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 1182 } 1183 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 1184 CU_ASSERT(buffer_ptr == &buffer); 1185 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 1186 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 1187 g_mr_size = 0; 1188 g_mr_next_size = 0; 1189 1190 /* Test 2: Multi SGL */ 1191 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 1192 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 1193 sgl->address = 0; 1194 rdma_req.recv->buf = (void *)&sgl_desc; 1195 MOCK_SET(spdk_mempool_get, &data); 1196 aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) & 1197 ~NVMF_DATA_BUFFER_MASK); 1198 1199 /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */ 1200 reset_nvmf_rdma_request(&rdma_req); 1201 spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false, 1202 SPDK_DIF_TYPE1, 1203 SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0); 1204 rdma_req.req.dif.dif_insert_or_strip = true; 1205 rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4; 1206 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 1207 1208 for (i = 0; i < 2; i++) { 1209 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 1210 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 1211 sgl_desc[i].keyed.length = data_bs * 4; 1212 sgl_desc[i].address = 0x4000 + i * data_bs * 4; 1213 sgl_desc[i].keyed.key = 0x44; 1214 } 1215 1216 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 1217 1218 CU_ASSERT(rc == 0); 1219 CU_ASSERT(rdma_req.req.data_from_pool == true); 1220 CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2); 1221 CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length); 1222 CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2); 1223 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 1224 for (i = 0; i < 4; ++i) { 1225 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1226 (data_bs + md_size)); 1227 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs); 1228 } 1229 1230 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 1231 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 1232 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 1233 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 1234 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4); 1235 CU_ASSERT(data.wr.num_sge == 4); 1236 for (i = 0; i < 4; ++i) { 1237 CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i * 1238 (data_bs + md_size)); 1239 CU_ASSERT(data.wr.sg_list[i].length == data_bs); 1240 } 1241 1242 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 1243 } 1244 1245 int main(int argc, char **argv) 1246 { 1247 CU_pSuite suite = NULL; 1248 unsigned int num_failures; 1249 1250 if (CU_initialize_registry() != CUE_SUCCESS) { 1251 return CU_get_error(); 1252 } 1253 1254 suite = CU_add_suite("nvmf", NULL, NULL); 1255 if (suite == NULL) { 1256 CU_cleanup_registry(); 1257 return CU_get_error(); 1258 } 1259 1260 if (!CU_add_test(suite, "test_parse_sgl", test_spdk_nvmf_rdma_request_parse_sgl) || 1261 !CU_add_test(suite, "test_request_process", test_spdk_nvmf_rdma_request_process) || 1262 !CU_add_test(suite, "test_optimal_pg", test_spdk_nvmf_rdma_get_optimal_poll_group) || 1263 !CU_add_test(suite, "test_parse_sgl_with_md", test_spdk_nvmf_rdma_request_parse_sgl_with_md)) { 1264 CU_cleanup_registry(); 1265 return CU_get_error(); 1266 } 1267 1268 CU_basic_set_mode(CU_BRM_VERBOSE); 1269 CU_basic_run_tests(); 1270 num_failures = CU_get_number_of_failures(); 1271 CU_cleanup_registry(); 1272 return num_failures; 1273 } 1274