1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "common/lib/test_env.c" 37 #include "nvmf/rdma.c" 38 39 uint64_t g_mr_size; 40 struct ibv_mr g_rdma_mr; 41 42 #define RDMA_UT_UNITS_IN_MAX_IO 16 43 44 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = { 45 .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH, 46 .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR, 47 .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE, 48 .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO), 49 .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE, 50 .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH, 51 .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS, 52 }; 53 54 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF) 55 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 56 uint64_t size, uint64_t translation), 0); 57 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr, 58 uint64_t size), 0); 59 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation, 60 const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL); 61 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair, 62 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 63 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap)); 64 65 struct spdk_trace_histories *g_trace_histories; 66 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn)); 67 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix)); 68 DEFINE_STUB_V(spdk_trace_register_description, (const char *name, 69 uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object, 70 uint8_t arg1_type, const char *arg1_name)); 71 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 72 uint32_t size, uint64_t object_id, uint64_t arg1)); 73 74 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 75 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 76 const struct spdk_nvme_transport_id *trid2), 0); 77 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 78 79 uint64_t 80 spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size) 81 { 82 if (g_mr_size != 0) { 83 *(uint32_t *)size = g_mr_size; 84 } 85 86 return (uint64_t)&g_rdma_mr; 87 } 88 89 static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req) 90 { 91 int i; 92 93 rdma_req->req.length = 0; 94 rdma_req->data_from_pool = false; 95 rdma_req->req.data = NULL; 96 rdma_req->data.wr.num_sge = 0; 97 rdma_req->data.wr.wr.rdma.remote_addr = 0; 98 rdma_req->data.wr.wr.rdma.rkey = 0; 99 100 for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) { 101 rdma_req->req.iov[i].iov_base = 0; 102 rdma_req->req.iov[i].iov_len = 0; 103 rdma_req->buffers[i] = 0; 104 rdma_req->data.wr.sg_list[i].addr = 0; 105 rdma_req->data.wr.sg_list[i].length = 0; 106 rdma_req->data.wr.sg_list[i].lkey = 0; 107 } 108 } 109 110 static void 111 test_spdk_nvmf_rdma_request_parse_sgl(void) 112 { 113 struct spdk_nvmf_rdma_transport rtransport; 114 struct spdk_nvmf_rdma_device device; 115 struct spdk_nvmf_rdma_request rdma_req; 116 struct spdk_nvmf_rdma_recv recv; 117 struct spdk_nvmf_rdma_poll_group group; 118 struct spdk_nvmf_rdma_qpair rqpair; 119 struct spdk_nvmf_rdma_poller poller; 120 union nvmf_c2h_msg cpl; 121 union nvmf_h2c_msg cmd; 122 struct spdk_nvme_sgl_descriptor *sgl; 123 struct spdk_nvmf_transport_pg_cache_buf bufs[4]; 124 struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}}; 125 struct spdk_nvmf_rdma_request_data data; 126 int rc, i; 127 128 data.wr.sg_list = data.sgl; 129 STAILQ_INIT(&group.group.buf_cache); 130 group.group.buf_cache_size = 0; 131 group.group.buf_cache_count = 0; 132 poller.group = &group; 133 rqpair.poller = &poller; 134 rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES; 135 136 sgl = &cmd.nvme_cmd.dptr.sgl1; 137 rdma_req.recv = &recv; 138 rdma_req.req.cmd = &cmd; 139 rdma_req.req.rsp = &cpl; 140 rdma_req.data.wr.sg_list = rdma_req.data.sgl; 141 rdma_req.req.qpair = &rqpair.qpair; 142 rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST; 143 144 rtransport.transport.opts = g_rdma_ut_transport_opts; 145 rtransport.data_wr_pool = NULL; 146 rtransport.transport.data_buf_pool = NULL; 147 148 device.attr.device_cap_flags = 0; 149 g_rdma_mr.lkey = 0xABCD; 150 sgl->keyed.key = 0xEEEE; 151 sgl->address = 0xFFFF; 152 rdma_req.recv->buf = (void *)0xDDDD; 153 154 /* Test 1: sgl type: keyed data block subtype: address */ 155 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 156 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 157 158 /* Part 1: simple I/O, one SGL smaller than the transport io unit size */ 159 MOCK_SET(spdk_mempool_get, (void *)0x2000); 160 reset_nvmf_rdma_request(&rdma_req); 161 sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2; 162 163 device.map = (void *)0x0; 164 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 165 CU_ASSERT(rc == 0); 166 CU_ASSERT(rdma_req.data_from_pool == true); 167 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2); 168 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 169 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 170 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 171 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 172 CU_ASSERT((uint64_t)rdma_req.buffers[0] == 0x2000); 173 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000); 174 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2); 175 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey); 176 177 /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */ 178 reset_nvmf_rdma_request(&rdma_req); 179 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 180 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 181 182 CU_ASSERT(rc == 0); 183 CU_ASSERT(rdma_req.data_from_pool == true); 184 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO); 185 CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO); 186 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 187 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 188 for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) { 189 CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000); 190 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 191 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 192 CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey); 193 } 194 195 /* Part 3: simple I/O one SGL larger than the transport max io size */ 196 reset_nvmf_rdma_request(&rdma_req); 197 sgl->keyed.length = rtransport.transport.opts.max_io_size * 2; 198 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 199 200 CU_ASSERT(rc == -1); 201 202 /* Part 4: Pretend there are no buffer pools */ 203 MOCK_SET(spdk_mempool_get, NULL); 204 reset_nvmf_rdma_request(&rdma_req); 205 sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO; 206 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 207 208 CU_ASSERT(rc == 0); 209 CU_ASSERT(rdma_req.data_from_pool == false); 210 CU_ASSERT(rdma_req.req.data == NULL); 211 CU_ASSERT(rdma_req.data.wr.num_sge == 0); 212 CU_ASSERT(rdma_req.buffers[0] == NULL); 213 CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0); 214 CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0); 215 CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0); 216 217 218 rdma_req.recv->buf = (void *)0xDDDD; 219 /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */ 220 sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 221 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 222 223 /* Part 1: Normal I/O smaller than in capsule data size no offset */ 224 reset_nvmf_rdma_request(&rdma_req); 225 sgl->address = 0; 226 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 227 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 228 229 CU_ASSERT(rc == 0); 230 CU_ASSERT(rdma_req.req.data == (void *)0xDDDD); 231 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size); 232 CU_ASSERT(rdma_req.data_from_pool == false); 233 234 /* Part 2: I/O offset + length too large */ 235 reset_nvmf_rdma_request(&rdma_req); 236 sgl->address = rtransport.transport.opts.in_capsule_data_size; 237 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size; 238 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 239 240 CU_ASSERT(rc == -1); 241 242 /* Part 3: I/O too large */ 243 reset_nvmf_rdma_request(&rdma_req); 244 sgl->address = 0; 245 sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2; 246 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 247 248 CU_ASSERT(rc == -1); 249 250 /* Test 3: Multi SGL */ 251 sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 252 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET; 253 sgl->address = 0; 254 rdma_req.recv->buf = (void *)&sgl_desc; 255 MOCK_SET(spdk_mempool_get, &data); 256 257 /* part 1: 2 segments each with 1 wr. */ 258 reset_nvmf_rdma_request(&rdma_req); 259 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 260 for (i = 0; i < 2; i++) { 261 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 262 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 263 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size; 264 sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size; 265 sgl_desc[i].keyed.key = 0x44; 266 } 267 268 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 269 270 CU_ASSERT(rc == 0); 271 CU_ASSERT(rdma_req.data_from_pool == true); 272 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2); 273 CU_ASSERT(rdma_req.data.wr.num_sge == 1); 274 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 275 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 276 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 277 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 278 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size); 279 CU_ASSERT(data.wr.num_sge == 1); 280 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 281 282 /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */ 283 reset_nvmf_rdma_request(&rdma_req); 284 sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor); 285 for (i = 0; i < 2; i++) { 286 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 287 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 288 sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8; 289 sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size; 290 sgl_desc[i].keyed.key = 0x44; 291 } 292 293 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 294 295 CU_ASSERT(rc == 0); 296 CU_ASSERT(rdma_req.data_from_pool == true); 297 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 298 CU_ASSERT(rdma_req.req.iovcnt == 16); 299 CU_ASSERT(rdma_req.data.wr.num_sge == 8); 300 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 301 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 302 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 303 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 304 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8); 305 CU_ASSERT(data.wr.num_sge == 8); 306 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 307 308 /* part 3: 2 segments, one very large, one very small */ 309 reset_nvmf_rdma_request(&rdma_req); 310 for (i = 0; i < 2; i++) { 311 sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 312 sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 313 sgl_desc[i].keyed.key = 0x44; 314 } 315 316 sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 + 317 rtransport.transport.opts.io_unit_size / 2; 318 sgl_desc[0].address = 0x4000; 319 sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2; 320 sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 321 rtransport.transport.opts.io_unit_size / 2; 322 323 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 324 325 CU_ASSERT(rc == 0); 326 CU_ASSERT(rdma_req.data_from_pool == true); 327 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16); 328 CU_ASSERT(rdma_req.req.iovcnt == 17); 329 CU_ASSERT(rdma_req.data.wr.num_sge == 16); 330 for (i = 0; i < 15; i++) { 331 CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size); 332 } 333 CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2); 334 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44); 335 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000); 336 CU_ASSERT(rdma_req.data.wr.next == &data.wr); 337 CU_ASSERT(data.wr.wr.rdma.rkey == 0x44); 338 CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 + 339 rtransport.transport.opts.io_unit_size / 2); 340 CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2); 341 CU_ASSERT(data.wr.num_sge == 1); 342 CU_ASSERT(data.wr.next == &rdma_req.rsp.wr); 343 344 /* Test 4: use PG buffer cache */ 345 sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK; 346 sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS; 347 sgl->address = 0xFFFF; 348 rdma_req.recv->buf = (void *)0xDDDD; 349 g_rdma_mr.lkey = 0xABCD; 350 sgl->keyed.key = 0xEEEE; 351 352 for (i = 0; i < 4; i++) { 353 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 354 } 355 356 /* part 1: use the four buffers from the pg cache */ 357 358 group.group.buf_cache_size = 4; 359 group.group.buf_cache_count = 4; 360 MOCK_SET(spdk_mempool_get, (void *)0x2000); 361 reset_nvmf_rdma_request(&rdma_req); 362 sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4; 363 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 364 365 SPDK_CU_ASSERT_FATAL(rc == 0); 366 CU_ASSERT(rdma_req.data_from_pool == true); 367 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 368 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 369 ~NVMF_DATA_BUFFER_MASK)); 370 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 371 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 372 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 373 CU_ASSERT(group.group.buf_cache_count == 0); 374 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 375 for (i = 0; i < 4; i++) { 376 CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]); 377 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 378 ~NVMF_DATA_BUFFER_MASK)); 379 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 380 } 381 /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */ 382 383 reset_nvmf_rdma_request(&rdma_req); 384 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 385 386 SPDK_CU_ASSERT_FATAL(rc == 0); 387 CU_ASSERT(rdma_req.data_from_pool == true); 388 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 389 CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000); 390 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 391 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 392 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 393 CU_ASSERT(group.group.buf_cache_count == 0); 394 CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache)); 395 for (i = 0; i < 4; i++) { 396 CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000); 397 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 398 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 399 CU_ASSERT(group.group.buf_cache_count == 0); 400 } 401 402 /* part 3: half and half */ 403 group.group.buf_cache_count = 2; 404 405 for (i = 0; i < 2; i++) { 406 STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link); 407 } 408 reset_nvmf_rdma_request(&rdma_req); 409 rc = spdk_nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req); 410 411 SPDK_CU_ASSERT_FATAL(rc == 0); 412 CU_ASSERT(rdma_req.data_from_pool == true); 413 CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4); 414 CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) & 415 ~NVMF_DATA_BUFFER_MASK)); 416 CU_ASSERT(rdma_req.data.wr.num_sge == 4); 417 CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE); 418 CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF); 419 CU_ASSERT(group.group.buf_cache_count == 0); 420 for (i = 0; i < 2; i++) { 421 CU_ASSERT((uint64_t)rdma_req.buffers[i] == (uint64_t)&bufs[i]); 422 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) & 423 ~NVMF_DATA_BUFFER_MASK)); 424 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 425 } 426 for (i = 2; i < 4; i++) { 427 CU_ASSERT((uint64_t)rdma_req.buffers[i] == 0x2000); 428 CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000); 429 CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size); 430 } 431 } 432 433 int main(int argc, char **argv) 434 { 435 CU_pSuite suite = NULL; 436 unsigned int num_failures; 437 438 if (CU_initialize_registry() != CUE_SUCCESS) { 439 return CU_get_error(); 440 } 441 442 suite = CU_add_suite("nvmf", NULL, NULL); 443 if (suite == NULL) { 444 CU_cleanup_registry(); 445 return CU_get_error(); 446 } 447 448 if ( 449 CU_add_test(suite, "test_parse_sgl", test_spdk_nvmf_rdma_request_parse_sgl) == NULL) { 450 CU_cleanup_registry(); 451 return CU_get_error(); 452 } 453 454 CU_basic_set_mode(CU_BRM_VERBOSE); 455 CU_basic_run_tests(); 456 num_failures = CU_get_number_of_failures(); 457 CU_cleanup_registry(); 458 return num_failures; 459 } 460