1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. All rights reserved. 5 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "nvmf/rdma.c" 38 39 uint64_t g_mr_size; 40 uint64_t g_mr_next_size; 41 struct ibv_mr g_rdma_mr; 42 43 #define RDMA_UT_UNITS_IN_MAX_IO 16 44 45 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 46 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 47 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 48 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 49 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 50 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 51 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 52 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 53 }; 54 55 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 56 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 57 uint64_t size, uint64_t translation), 0); 58 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 59 uint64_t size), 0); 60 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 61 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 62 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 63 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 64 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 65 66 struct spdk_trace_histories *g_trace_histories; 67 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 68 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 69 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 70 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 71 uint8_t arg1_type, const char *arg1_name)); 72 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 73 uint32_t size, uint64_t object_id, uint64_t arg1)); 74 75 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 76 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 77 const struct spdk_nvme_transport_id *trid2), 0); 78 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 79 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req, 80 struct spdk_dif_ctx *dif_ctx), false); 81 82 void 83 spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req, 84 struct spdk_nvmf_transport_poll_group *group, 85 struct spdk_nvmf_transport *transport, 86 uint32_t num_buffers) 87 { 88 uint32_t i; 89 90 for (i = 0; i < num_buffers; i++) { 91 if (group->buf_cache_count < group->buf_cache_size) { 92 STAILQ_INSERT_HEAD(&group->buf_cache, 93 (struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i], 94 link); 95 group->buf_cache_count++; 96 } else { 97 spdk_mempool_put(transport->data_buf_pool, req->buffers[i]); 98 } 99 req->iov[i].iov_base = NULL; 100 req->buffers[i] = NULL; 101 req->iov[i].iov_len = 0; 102 } 103 req->data_from_pool = false; 104 } 105 106 int 107 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 108 struct spdk_nvmf_transport_poll_group *group, 109 struct spdk_nvmf_transport *transport, 110 uint32_t num_buffers) 111 { 112 uint32_t i = 0; 113 114 while (i < num_buffers) { 115 if (!(STAILQ_EMPTY(&group->buf_cache))) { 116 group->buf_cache_count--; 117 req->buffers[i] = STAILQ_FIRST(&group->buf_cache); 118 STAILQ_REMOVE_HEAD(&group->buf_cache, link); 119 i++; 120 } else { 121 if (spdk_mempool_get_bulk(transport->data_buf_pool, &req->buffers[i], 122 num_buffers - i)) { 123 goto err_exit; 124 } 125 i += num_buffers - i; 126 } 127 } 128 129 return 0; 130 131 err_exit: 132 spdk_nvmf_request_free_buffers(req, group, transport, i); 133 return -ENOMEM; 134 } 135 136 uint64_t 137 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 138 { 139 if (g_mr_size != 0) { 140 *(uint32_t *)size = g_mr_size; 141 if (g_mr_next_size != 0) { 142 g_mr_size = g_mr_next_size; 143 } 144 } 145 146 return (uint64_t)&g_rdma_mr; 147 } 148 149 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 150 { 151 int i; 152 153 rdma_req->req.length = 0; 154 rdma_req->req.data_from_pool = false; 155 rdma_req->req.data = NULL; 156 rdma_req->data.wr.num_sge = 0; 157 rdma_req->data.wr.wr.rdma.remote_addr = 0; 158 rdma_req->data.wr.wr.rdma.rkey = 0; 159 rdma_req->elba_length = 0; 160 rdma_req->orig_length = 0; 161 rdma_req->dif_insert_or_strip = false; 162 163 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 164 rdma_req->req.iov[i].iov_base = 0; 165 rdma_req->req.iov[i].iov_len = 0; 166 rdma_req->req.buffers[i] = 0; 167 rdma_req->data.wr.sg_list[i].addr = 0; 168 rdma_req->data.wr.sg_list[i].length = 0; 169 rdma_req->data.wr.sg_list[i].lkey = 0; 170 } 171 } 172 173 static void 174 test_spdk_nvmf_rdma_request_parse_sgl(void) 175 { 176 struct spdk_nvmf_rdma_transport rtransport; 177 struct spdk_nvmf_rdma_device device; 178 struct spdk_nvmf_rdma_request rdma_req = {}; 179 struct spdk_nvmf_rdma_recv recv; 180 struct spdk_nvmf_rdma_poll_group group; 181 struct spdk_nvmf_rdma_qpair rqpair; 182 struct spdk_nvmf_rdma_poller poller; 183 union nvmf_c2h_msg cpl; 184 union nvmf_h2c_msg cmd; 185 struct spdk_nvme_sgl_descriptor *sgl; 186 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 187 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 188 struct spdk_nvmf_rdma_request_data data; 189 struct spdk_nvmf_transport_pg_cache_buf buffer; 190 struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr; 191 int rc, i; 192 193 data.wr.sg_list = data.sgl; 194 STAILQ_INIT(&group.group.buf_cache); 195 group.group.buf_cache_size = 0; 196 group.group.buf_cache_count = 0; 197 group.group.transport = &rtransport.transport; 198 STAILQ_INIT(&group.retired_bufs); 199 poller.group = &group; 200 rqpair.poller = &poller; 201 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 202 203 sgl = &cmd.nvme_cmd.dptr.sgl1; 204 rdma_req.recv = &recv; 205 rdma_req.req.cmd = &cmd; 206 rdma_req.req.rsp = &cpl; 207 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 208 rdma_req.req.qpair = &rqpair.qpair; 209 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 210 211 rtransport.transport.opts = g_rdma_ut_transport_opts; 212 rtransport.data_wr_pool = NULL; 213 rtransport.transport.data_buf_pool = NULL; 214 215 device.attr.device_cap_flags = 0; 216 g_rdma_mr.lkey = 0xABCD; 217 sgl->keyed.key = 0xEEEE; 218 sgl->address = 0xFFFF; 219 rdma_req.recv->buf = (void *)0xDDDD; 220 221 /* Test 1: sgl type: keyed data block subtype: address */ 222 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 223 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 224 225 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 226 MOCK_SET(spdk_mempool_get, (void *)0x2000); 227 reset_nvmf_rdma_request(&rdma_req); 228 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 229 230 device.map = (void *)0x0; 231 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 232 CU_ASSERT(rc == 0); 233 CU_ASSERT(rdma_req.req.data_from_pool == true); 234 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 235 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 236 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 237 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 238 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 239 CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000); 240 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 241 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 242 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 243 244 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 245 reset_nvmf_rdma_request(&rdma_req); 246 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 247 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 248 249 CU_ASSERT(rc == 0); 250 CU_ASSERT(rdma_req.req.data_from_pool == true); 251 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 252 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 253 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 254 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 255 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 256 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 257 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 258 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 259 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 260 } 261 262 /* Part 3: simple I/O one SGL larger than the transport max io size */ 263 reset_nvmf_rdma_request(&rdma_req); 264 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 265 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 266 267 CU_ASSERT(rc == -1); 268 269 /* Part 4: Pretend there are no buffer pools */ 270 MOCK_SET(spdk_mempool_get, NULL); 271 reset_nvmf_rdma_request(&rdma_req); 272 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 273 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 274 275 CU_ASSERT(rc == 0); 276 CU_ASSERT(rdma_req.req.data_from_pool == false); 277 CU_ASSERT(rdma_req.req.data == NULL); 278 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 279 CU_ASSERT(rdma_req.req.buffers[0] == NULL); 280 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 281 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 282 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 283 284 rdma_req.recv->buf = (void *)0xDDDD; 285 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 286 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 287 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 288 289 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 290 reset_nvmf_rdma_request(&rdma_req); 291 sgl->address = 0; 292 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 293 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 294 295 CU_ASSERT(rc == 0); 296 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 297 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 298 CU_ASSERT(rdma_req.req.data_from_pool == false); 299 300 /* Part 2: I/O offset + length too large */ 301 reset_nvmf_rdma_request(&rdma_req); 302 sgl->address = rtransport.transport.opts.in_capsule_data_size; 303 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 304 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 305 306 CU_ASSERT(rc == -1); 307 308 /* Part 3: I/O too large */ 309 reset_nvmf_rdma_request(&rdma_req); 310 sgl->address = 0; 311 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 312 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 313 314 CU_ASSERT(rc == -1); 315 316 /* Test 3: Multi SGL */ 317 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 318 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 319 sgl->address = 0; 320 rdma_req.recv->buf = (void *)&sgl_desc; 321 MOCK_SET(spdk_mempool_get, &data); 322 323 /* part 1: 2 segments each with 1 wr. */ 324 reset_nvmf_rdma_request(&rdma_req); 325 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 326 for (i = 0; i < 2; i++) { 327 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 328 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 329 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 330 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 331 sgl_desc[i].keyed.key = 0x44; 332 } 333 334 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 335 336 CU_ASSERT(rc == 0); 337 CU_ASSERT(rdma_req.req.data_from_pool == true); 338 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 339 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 340 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 341 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 342 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 343 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 344 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 345 CU_ASSERT(data.wr.num_sge == 1); 346 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 347 348 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 349 reset_nvmf_rdma_request(&rdma_req); 350 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 351 for (i = 0; i < 2; i++) { 352 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 353 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 354 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 355 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 356 sgl_desc[i].keyed.key = 0x44; 357 } 358 359 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 360 361 CU_ASSERT(rc == 0); 362 CU_ASSERT(rdma_req.req.data_from_pool == true); 363 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 364 CU_ASSERT(rdma_req.req.iovcnt == 16); 365 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 366 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 367 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 368 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 369 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 370 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 371 CU_ASSERT(data.wr.num_sge == 8); 372 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 373 374 /* part 3: 2 segments, one very large, one very small */ 375 reset_nvmf_rdma_request(&rdma_req); 376 for (i = 0; i < 2; i++) { 377 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 378 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 379 sgl_desc[i].keyed.key = 0x44; 380 } 381 382 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 383 rtransport.transport.opts.io_unit_size / 2; 384 sgl_desc[0].address = 0x4000; 385 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 386 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 387 rtransport.transport.opts.io_unit_size / 2; 388 389 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 390 391 CU_ASSERT(rc == 0); 392 CU_ASSERT(rdma_req.req.data_from_pool == true); 393 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 394 CU_ASSERT(rdma_req.req.iovcnt == 17); 395 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 396 for (i = 0; i < 15; i++) { 397 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 398 } 399 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 400 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 401 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 402 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 403 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 404 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 405 rtransport.transport.opts.io_unit_size / 2); 406 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 407 CU_ASSERT(data.wr.num_sge == 1); 408 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 409 410 /* Test 4: use PG buffer cache */ 411 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 412 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 413 sgl->address = 0xFFFF; 414 rdma_req.recv->buf = (void *)0xDDDD; 415 g_rdma_mr.lkey = 0xABCD; 416 sgl->keyed.key = 0xEEEE; 417 418 for (i = 0; i < 4; i++) { 419 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 420 } 421 422 /* part 1: use the four buffers from the pg cache */ 423 group.group.buf_cache_size = 4; 424 group.group.buf_cache_count = 4; 425 MOCK_SET(spdk_mempool_get, (void *)0x2000); 426 reset_nvmf_rdma_request(&rdma_req); 427 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 428 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 429 430 SPDK_CU_ASSERT_FATAL(rc == 0); 431 CU_ASSERT(rdma_req.req.data_from_pool == true); 432 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 433 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 434 ~NVMF_DATA_BUFFER_MASK)); 435 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 436 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 437 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 438 CU_ASSERT(group.group.buf_cache_count == 0); 439 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 440 for (i = 0; i < 4; i++) { 441 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 442 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 443 ~NVMF_DATA_BUFFER_MASK)); 444 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 445 } 446 447 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 448 reset_nvmf_rdma_request(&rdma_req); 449 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 450 451 SPDK_CU_ASSERT_FATAL(rc == 0); 452 CU_ASSERT(rdma_req.req.data_from_pool == true); 453 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 454 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 455 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 456 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 457 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 458 CU_ASSERT(group.group.buf_cache_count == 0); 459 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 460 for (i = 0; i < 4; i++) { 461 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 462 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 463 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 464 CU_ASSERT(group.group.buf_cache_count == 0); 465 } 466 467 /* part 3: half and half */ 468 group.group.buf_cache_count = 2; 469 470 for (i = 0; i < 2; i++) { 471 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 472 } 473 reset_nvmf_rdma_request(&rdma_req); 474 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 475 476 SPDK_CU_ASSERT_FATAL(rc == 0); 477 CU_ASSERT(rdma_req.req.data_from_pool == true); 478 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 479 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 480 ~NVMF_DATA_BUFFER_MASK)); 481 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 482 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 483 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 484 CU_ASSERT(group.group.buf_cache_count == 0); 485 for (i = 0; i < 2; i++) { 486 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]); 487 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 488 ~NVMF_DATA_BUFFER_MASK)); 489 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 490 } 491 for (i = 2; i < 4; i++) { 492 CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000); 493 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 494 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 495 } 496 497 reset_nvmf_rdma_request(&rdma_req); 498 /* Test 5 dealing with a buffer split over two Memory Regions */ 499 MOCK_SET(spdk_mempool_get, (void *)&buffer); 500 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 501 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 502 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 503 g_mr_size = rtransport.transport.opts.io_unit_size / 4; 504 g_mr_next_size = rtransport.transport.opts.io_unit_size / 2; 505 506 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 507 SPDK_CU_ASSERT_FATAL(rc == 0); 508 CU_ASSERT(rdma_req.req.data_from_pool == true); 509 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 510 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 511 ~NVMF_DATA_BUFFER_MASK)); 512 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 513 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 514 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 515 CU_ASSERT(rdma_req.req.buffers[0] == &buffer); 516 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) & 517 ~NVMF_DATA_BUFFER_MASK)); 518 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 519 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 520 buffer_ptr = STAILQ_FIRST(&group.retired_bufs); 521 CU_ASSERT(buffer_ptr == &buffer); 522 STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link); 523 CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs)); 524 525 reset_nvmf_rdma_request(&rdma_req); 526 } 527 528 static struct spdk_nvmf_rdma_recv * 529 create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc) 530 { 531 struct spdk_nvmf_rdma_recv *rdma_recv; 532 union nvmf_h2c_msg *cmd; 533 struct spdk_nvme_sgl_descriptor *sgl; 534 535 rdma_recv = calloc(1, sizeof(*rdma_recv)); 536 rdma_recv->qpair = rqpair; 537 cmd = calloc(1, sizeof(*cmd)); 538 rdma_recv->sgl[0].addr = (uintptr_t)cmd; 539 cmd->nvme_cmd.opc = opc; 540 sgl = &cmd->nvme_cmd.dptr.sgl1; 541 sgl->keyed.key = 0xEEEE; 542 sgl->address = 0xFFFF; 543 sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 544 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 545 sgl->keyed.length = 1; 546 547 return rdma_recv; 548 } 549 550 static void 551 free_recv(struct spdk_nvmf_rdma_recv *rdma_recv) 552 { 553 free((void *)rdma_recv->sgl[0].addr); 554 free(rdma_recv); 555 } 556 557 static struct spdk_nvmf_rdma_request * 558 create_req(struct spdk_nvmf_rdma_qpair *rqpair, 559 struct spdk_nvmf_rdma_recv *rdma_recv) 560 { 561 struct spdk_nvmf_rdma_request *rdma_req; 562 union nvmf_c2h_msg *cpl; 563 564 rdma_req = calloc(1, sizeof(*rdma_req)); 565 rdma_req->recv = rdma_recv; 566 rdma_req->req.qpair = &rqpair->qpair; 567 rdma_req->state = RDMA_REQUEST_STATE_NEW; 568 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr; 569 rdma_req->data.wr.sg_list = rdma_req->data.sgl; 570 cpl = calloc(1, sizeof(*cpl)); 571 rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl; 572 rdma_req->req.rsp = cpl; 573 574 return rdma_req; 575 } 576 577 static void 578 free_req(struct spdk_nvmf_rdma_request *rdma_req) 579 { 580 free((void *)rdma_req->rsp.sgl[0].addr); 581 free(rdma_req); 582 } 583 584 static void 585 qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair, 586 struct spdk_nvmf_rdma_poller *poller, 587 struct spdk_nvmf_rdma_port *port, 588 struct spdk_nvmf_rdma_resources *resources) 589 { 590 memset(rqpair, 0, sizeof(*rqpair)); 591 STAILQ_INIT(&rqpair->pending_rdma_write_queue); 592 STAILQ_INIT(&rqpair->pending_rdma_read_queue); 593 rqpair->poller = poller; 594 rqpair->port = port; 595 rqpair->resources = resources; 596 rqpair->qpair.qid = 1; 597 rqpair->ibv_state = IBV_QPS_RTS; 598 rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 599 rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 600 rqpair->max_send_depth = 16; 601 rqpair->max_read_depth = 16; 602 resources->recvs_to_post.first = resources->recvs_to_post.last = NULL; 603 } 604 605 static void 606 poller_reset(struct spdk_nvmf_rdma_poller *poller, 607 struct spdk_nvmf_rdma_poll_group *group) 608 { 609 memset(poller, 0, sizeof(*poller)); 610 STAILQ_INIT(&poller->qpairs_pending_recv); 611 STAILQ_INIT(&poller->qpairs_pending_send); 612 poller->group = group; 613 } 614 615 static void 616 test_spdk_nvmf_rdma_request_process(void) 617 { 618 struct spdk_nvmf_rdma_transport rtransport = {}; 619 struct spdk_nvmf_rdma_poll_group group = {}; 620 struct spdk_nvmf_rdma_poller poller = {}; 621 struct spdk_nvmf_rdma_port port = {}; 622 struct spdk_nvmf_rdma_device device = {}; 623 struct spdk_nvmf_rdma_resources resources = {}; 624 struct spdk_nvmf_rdma_qpair rqpair = {}; 625 struct spdk_nvmf_rdma_recv *rdma_recv; 626 struct spdk_nvmf_rdma_request *rdma_req; 627 bool progress; 628 629 STAILQ_INIT(&group.group.buf_cache); 630 STAILQ_INIT(&group.group.pending_buf_queue); 631 group.group.buf_cache_size = 0; 632 group.group.buf_cache_count = 0; 633 port.device = &device; 634 poller_reset(&poller, &group); 635 qpair_reset(&rqpair, &poller, &port, &resources); 636 637 rtransport.transport.opts = g_rdma_ut_transport_opts; 638 rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0); 639 rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128, 640 sizeof(struct spdk_nvmf_rdma_request_data), 641 0, 0); 642 MOCK_CLEAR(spdk_mempool_get); 643 644 device.attr.device_cap_flags = 0; 645 device.map = (void *)0x0; 646 g_rdma_mr.lkey = 0xABCD; 647 648 /* Test 1: single SGL READ request */ 649 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ); 650 rdma_req = create_req(&rqpair, rdma_recv); 651 rqpair.current_recv_depth = 1; 652 /* NEW -> EXECUTING */ 653 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 654 CU_ASSERT(progress == true); 655 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 656 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST); 657 /* EXECUTED -> TRANSFERRING_C2H */ 658 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 659 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 660 CU_ASSERT(progress == true); 661 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 662 CU_ASSERT(rdma_req->recv == NULL); 663 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 664 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 665 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 666 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 667 /* COMPLETED -> FREE */ 668 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 669 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 670 CU_ASSERT(progress == true); 671 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 672 673 free_recv(rdma_recv); 674 free_req(rdma_req); 675 poller_reset(&poller, &group); 676 qpair_reset(&rqpair, &poller, &port, &resources); 677 678 /* Test 2: single SGL WRITE request */ 679 rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 680 rdma_req = create_req(&rqpair, rdma_recv); 681 rqpair.current_recv_depth = 1; 682 /* NEW -> TRANSFERRING_H2C */ 683 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 684 CU_ASSERT(progress == true); 685 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 686 CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER); 687 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->data.wr); 688 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->data.wr); 689 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 690 STAILQ_INIT(&poller.qpairs_pending_send); 691 /* READY_TO_EXECUTE -> EXECUTING */ 692 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 693 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 694 CU_ASSERT(progress == true); 695 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING); 696 /* EXECUTED -> COMPLETING */ 697 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED; 698 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 699 CU_ASSERT(progress == true); 700 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING); 701 CU_ASSERT(rdma_req->recv == NULL); 702 CU_ASSERT(rqpair.sends_to_post.first == &rdma_req->rsp.wr); 703 CU_ASSERT(rqpair.sends_to_post.last == &rdma_req->rsp.wr); 704 CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr); 705 CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr); 706 /* COMPLETED -> FREE */ 707 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED; 708 progress = spdk_nvmf_rdma_request_process(&rtransport, rdma_req); 709 CU_ASSERT(progress == true); 710 CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE); 711 712 free_recv(rdma_recv); 713 free_req(rdma_req); 714 poller_reset(&poller, &group); 715 qpair_reset(&rqpair, &poller, &port, &resources); 716 717 /* Test 3: WRITE+WRITE ibv_send batching */ 718 { 719 struct spdk_nvmf_rdma_recv *recv1, *recv2; 720 struct spdk_nvmf_rdma_request *req1, *req2; 721 recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 722 req1 = create_req(&rqpair, recv1); 723 recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE); 724 req2 = create_req(&rqpair, recv2); 725 726 /* WRITE 1: NEW -> TRANSFERRING_H2C */ 727 rqpair.current_recv_depth = 1; 728 spdk_nvmf_rdma_request_process(&rtransport, req1); 729 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 730 /* WRITE 1 is the first in batching list */ 731 CU_ASSERT(rqpair.sends_to_post.first == &req1->data.wr); 732 CU_ASSERT(rqpair.sends_to_post.last == &req1->data.wr); 733 734 /* WRITE 2: NEW -> TRANSFERRING_H2C */ 735 rqpair.current_recv_depth = 2; 736 spdk_nvmf_rdma_request_process(&rtransport, req2); 737 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 738 /* WRITE 2 is now also in the batching list */ 739 CU_ASSERT(rqpair.sends_to_post.first->next == &req2->data.wr); 740 CU_ASSERT(rqpair.sends_to_post.last == &req2->data.wr); 741 742 /* Send everything */ 743 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 744 STAILQ_INIT(&poller.qpairs_pending_send); 745 746 /* WRITE 1 completes before WRITE 2 has finished RDMA reading */ 747 /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */ 748 req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 749 spdk_nvmf_rdma_request_process(&rtransport, req1); 750 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING); 751 /* WRITE 1: EXECUTED -> COMPLETING */ 752 req1->state = RDMA_REQUEST_STATE_EXECUTED; 753 spdk_nvmf_rdma_request_process(&rtransport, req1); 754 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING); 755 CU_ASSERT(rqpair.sends_to_post.first == &req1->rsp.wr); 756 CU_ASSERT(rqpair.sends_to_post.last == &req1->rsp.wr); 757 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 758 STAILQ_INIT(&poller.qpairs_pending_send); 759 /* WRITE 1: COMPLETED -> FREE */ 760 req1->state = RDMA_REQUEST_STATE_COMPLETED; 761 spdk_nvmf_rdma_request_process(&rtransport, req1); 762 CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE); 763 764 /* Now WRITE 2 has finished reading and completes */ 765 /* WRITE 2: COMPLETED -> FREE */ 766 /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */ 767 req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE; 768 spdk_nvmf_rdma_request_process(&rtransport, req2); 769 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING); 770 /* WRITE 1: EXECUTED -> COMPLETING */ 771 req2->state = RDMA_REQUEST_STATE_EXECUTED; 772 spdk_nvmf_rdma_request_process(&rtransport, req2); 773 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING); 774 CU_ASSERT(rqpair.sends_to_post.first == &req2->rsp.wr); 775 CU_ASSERT(rqpair.sends_to_post.last == &req2->rsp.wr); 776 rqpair.sends_to_post.first = rqpair.sends_to_post.last = NULL; 777 STAILQ_INIT(&poller.qpairs_pending_send); 778 /* WRITE 1: COMPLETED -> FREE */ 779 req2->state = RDMA_REQUEST_STATE_COMPLETED; 780 spdk_nvmf_rdma_request_process(&rtransport, req2); 781 CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE); 782 783 free_recv(recv1); 784 free_req(req1); 785 free_recv(recv2); 786 free_req(req2); 787 poller_reset(&poller, &group); 788 qpair_reset(&rqpair, &poller, &port, &resources); 789 } 790 791 spdk_mempool_free(rtransport.transport.data_buf_pool); 792 spdk_mempool_free(rtransport.data_wr_pool); 793 } 794 795 #define TEST_GROUPS_COUNT 5 796 static void 797 test_spdk_nvmf_rdma_get_optimal_poll_group(void) 798 { 799 struct spdk_nvmf_rdma_transport rtransport = {}; 800 struct spdk_nvmf_transport *transport = &rtransport.transport; 801 struct spdk_nvmf_rdma_qpair rqpair = {}; 802 struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT]; 803 struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT]; 804 struct spdk_nvmf_transport_poll_group *result; 805 uint32_t i; 806 807 rqpair.qpair.transport = transport; 808 pthread_mutex_init(&rtransport.lock, NULL); 809 TAILQ_INIT(&rtransport.poll_groups); 810 811 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 812 groups[i] = spdk_nvmf_rdma_poll_group_create(transport); 813 CU_ASSERT(groups[i] != NULL); 814 rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group); 815 groups[i]->transport = transport; 816 } 817 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]); 818 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]); 819 820 /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */ 821 for (i = 0; i < TEST_GROUPS_COUNT; i++) { 822 rqpair.qpair.qid = 0; 823 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 824 CU_ASSERT(result == groups[i]); 825 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 826 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]); 827 828 rqpair.qpair.qid = 1; 829 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 830 CU_ASSERT(result == groups[i]); 831 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 832 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]); 833 } 834 /* wrap around, admin/io pg point to the first pg 835 Destroy all poll groups except of the last one */ 836 for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) { 837 spdk_nvmf_rdma_poll_group_destroy(groups[i]); 838 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]); 839 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]); 840 } 841 842 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 843 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 844 845 /* Check that pointers to the next admin/io poll groups are not changed */ 846 rqpair.qpair.qid = 0; 847 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 848 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 849 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 850 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 851 852 rqpair.qpair.qid = 1; 853 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 854 CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]); 855 CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]); 856 CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]); 857 858 /* Remove the last poll group, check that pointers are NULL */ 859 spdk_nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]); 860 CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL); 861 CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL); 862 863 /* Request optimal poll group, result must be NULL */ 864 rqpair.qpair.qid = 0; 865 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 866 CU_ASSERT(result == NULL); 867 868 rqpair.qpair.qid = 1; 869 result = spdk_nvmf_rdma_get_optimal_poll_group(&rqpair.qpair); 870 CU_ASSERT(result == NULL); 871 872 pthread_mutex_destroy(&rtransport.lock); 873 } 874 #undef TEST_GROUPS_COUNT 875 876 int main(int argc, char **argv) 877 { 878 CU_pSuite suite = NULL; 879 unsigned int num_failures; 880 881 if (CU_initialize_registry() != CUE_SUCCESS) { 882 return CU_get_error(); 883 } 884 885 suite = CU_add_suite("nvmf", NULL, NULL); 886 if (suite == NULL) { 887 CU_cleanup_registry(); 888 return CU_get_error(); 889 } 890 891 if (!CU_add_test(suite, "test_parse_sgl", test_spdk_nvmf_rdma_request_parse_sgl) || 892 !CU_add_test(suite, "test_request_process", test_spdk_nvmf_rdma_request_process) || 893 !CU_add_test(suite, "test_optimal_pg", test_spdk_nvmf_rdma_get_optimal_poll_group)) { 894 CU_cleanup_registry(); 895 return CU_get_error(); 896 } 897 898 CU_basic_set_mode(CU_BRM_VERBOSE); 899 CU_basic_run_tests(); 900 num_failures = CU_get_number_of_failures(); 901 CU_cleanup_registry(); 902 return num_failures; 903 } 904