1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "common/lib/test_sock.c" 39 40 #include "nvme/nvme_tcp.c" 41 #include "common/lib/nvme/common_stubs.h" 42 43 SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME); 44 45 DEFINE_STUB(nvme_qpair_submit_request, 46 int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0); 47 48 static void 49 test_nvme_tcp_pdu_set_data_buf(void) 50 { 51 struct nvme_tcp_pdu pdu = {}; 52 struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {}; 53 uint32_t data_len; 54 uint64_t i; 55 56 /* 1st case: input is a single SGL entry. */ 57 iov[0].iov_base = (void *)0xDEADBEEF; 58 iov[0].iov_len = 4096; 59 60 nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512); 61 62 CU_ASSERT(pdu.data_iovcnt == 1); 63 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024); 64 CU_ASSERT(pdu.data_iov[0].iov_len == 512); 65 66 /* 2nd case: simulate split on multiple SGL entries. */ 67 iov[0].iov_base = (void *)0xDEADBEEF; 68 iov[0].iov_len = 4096; 69 iov[1].iov_base = (void *)0xFEEDBEEF; 70 iov[1].iov_len = 512 * 7; 71 iov[2].iov_base = (void *)0xF00DF00D; 72 iov[2].iov_len = 4096 * 2; 73 74 nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048); 75 76 CU_ASSERT(pdu.data_iovcnt == 1); 77 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 78 CU_ASSERT(pdu.data_iov[0].iov_len == 2048); 79 80 nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3); 81 82 CU_ASSERT(pdu.data_iovcnt == 2); 83 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048); 84 CU_ASSERT(pdu.data_iov[0].iov_len == 2048); 85 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 86 CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3); 87 88 nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2); 89 90 CU_ASSERT(pdu.data_iovcnt == 2); 91 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3); 92 CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4); 93 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D); 94 CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2); 95 96 /* 3rd case: Number of input SGL entries is equal to the number of PDU SGL 97 * entries. 98 */ 99 data_len = 0; 100 for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { 101 iov[i].iov_base = (void *)(0xDEADBEEF + i); 102 iov[i].iov_len = 512 * (i + 1); 103 data_len += 512 * (i + 1); 104 } 105 106 nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len); 107 108 CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS); 109 for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { 110 CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i); 111 CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1)); 112 } 113 } 114 115 static void 116 test_nvme_tcp_build_iovs(void) 117 { 118 const uintptr_t pdu_iov_len = 4096; 119 struct nvme_tcp_pdu pdu = {}; 120 struct iovec iovs[5] = {}; 121 uint32_t mapped_length = 0; 122 int rc; 123 124 pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 125 pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 126 pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 + 127 SPDK_NVME_TCP_DIGEST_LEN; 128 pdu.data_len = pdu_iov_len * 2; 129 pdu.padding_len = 0; 130 131 pdu.data_iov[0].iov_base = (void *)0xDEADBEEF; 132 pdu.data_iov[0].iov_len = pdu_iov_len; 133 pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len); 134 pdu.data_iov[1].iov_len = pdu_iov_len; 135 pdu.data_iovcnt = 2; 136 137 rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length); 138 CU_ASSERT(rc == 4); 139 CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); 140 CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); 141 CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); 142 CU_ASSERT(iovs[1].iov_len == pdu_iov_len); 143 CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len)); 144 CU_ASSERT(iovs[2].iov_len == pdu_iov_len); 145 CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest); 146 CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN); 147 CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + 148 pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN); 149 150 /* Add a new data_iov entry, update pdu iov count and data length */ 151 pdu.data_iov[2].iov_base = (void *)(0xBAADF00D); 152 pdu.data_iov[2].iov_len = 123; 153 pdu.data_iovcnt = 3; 154 pdu.data_len += 123; 155 pdu.hdr.common.plen += 123; 156 157 rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length); 158 CU_ASSERT(rc == 5); 159 CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); 160 CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); 161 CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); 162 CU_ASSERT(iovs[1].iov_len == pdu_iov_len); 163 CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len)); 164 CU_ASSERT(iovs[2].iov_len == pdu_iov_len); 165 CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D)); 166 CU_ASSERT(iovs[3].iov_len == 123); 167 CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest); 168 CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN); 169 CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + 170 pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123); 171 } 172 173 struct nvme_tcp_ut_bdev_io { 174 struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS]; 175 int iovpos; 176 }; 177 178 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */ 179 static void 180 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset) 181 { 182 struct nvme_tcp_ut_bdev_io *bio = cb_arg; 183 struct iovec *iov; 184 185 for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) { 186 iov = &bio->iovs[bio->iovpos]; 187 /* Offset must be aligned with the start of any SGL entry */ 188 if (offset == 0) { 189 break; 190 } 191 192 SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len); 193 offset -= iov->iov_len; 194 } 195 196 SPDK_CU_ASSERT_FATAL(offset == 0); 197 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS); 198 } 199 200 static int 201 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length) 202 { 203 struct nvme_tcp_ut_bdev_io *bio = cb_arg; 204 struct iovec *iov; 205 206 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS); 207 208 iov = &bio->iovs[bio->iovpos]; 209 210 *address = iov->iov_base; 211 *length = iov->iov_len; 212 bio->iovpos++; 213 214 return 0; 215 } 216 217 static void 218 test_nvme_tcp_build_sgl_request(void) 219 { 220 struct nvme_tcp_qpair tqpair; 221 struct spdk_nvme_ctrlr ctrlr = {0}; 222 struct nvme_tcp_req tcp_req = {0}; 223 struct nvme_request req = {{0}}; 224 struct nvme_tcp_ut_bdev_io bio; 225 uint64_t i; 226 int rc; 227 228 ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS; 229 tqpair.qpair.ctrlr = &ctrlr; 230 tcp_req.req = &req; 231 232 req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl; 233 req.payload.next_sge_fn = nvme_tcp_ut_next_sge; 234 req.payload.contig_or_cb_arg = &bio; 235 req.qpair = &tqpair.qpair; 236 237 for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { 238 bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000); 239 bio.iovs[i].iov_len = 0; 240 } 241 242 /* Test case 1: Single SGL. Expected: PASS */ 243 bio.iovpos = 0; 244 req.payload_offset = 0; 245 req.payload_size = 0x1000; 246 bio.iovs[0].iov_len = 0x1000; 247 rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); 248 SPDK_CU_ASSERT_FATAL(rc == 0); 249 CU_ASSERT(bio.iovpos == 1); 250 CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base); 251 CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len); 252 CU_ASSERT(tcp_req.iovcnt == 1); 253 254 /* Test case 2: Multiple SGL. Expected: PASS */ 255 bio.iovpos = 0; 256 req.payload_offset = 0; 257 req.payload_size = 0x4000; 258 for (i = 0; i < 4; i++) { 259 bio.iovs[i].iov_len = 0x1000; 260 } 261 rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); 262 SPDK_CU_ASSERT_FATAL(rc == 0); 263 CU_ASSERT(bio.iovpos == 4); 264 CU_ASSERT(tcp_req.iovcnt == 4); 265 for (i = 0; i < 4; i++) { 266 CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len); 267 CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base); 268 } 269 270 /* Test case 3: Payload is bigger than SGL. Expected: FAIL */ 271 bio.iovpos = 0; 272 req.payload_offset = 0; 273 req.payload_size = 0x17000; 274 for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { 275 bio.iovs[i].iov_len = 0x1000; 276 } 277 rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req); 278 SPDK_CU_ASSERT_FATAL(rc != 0); 279 CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS); 280 for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) { 281 CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len); 282 CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base); 283 } 284 } 285 286 static void 287 test_nvme_tcp_pdu_set_data_buf_with_md(void) 288 { 289 struct nvme_tcp_pdu pdu = {}; 290 struct iovec iovs[7] = {}; 291 struct spdk_dif_ctx dif_ctx = {}; 292 int rc; 293 294 pdu.dif_ctx = &dif_ctx; 295 296 rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0, 297 0, 0, 0, 0, 0); 298 CU_ASSERT(rc == 0); 299 300 /* Single iovec case */ 301 iovs[0].iov_base = (void *)0xDEADBEEF; 302 iovs[0].iov_len = 2080; 303 304 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500); 305 306 CU_ASSERT(dif_ctx.data_offset == 0); 307 CU_ASSERT(pdu.data_len == 500); 308 CU_ASSERT(pdu.data_iovcnt == 1); 309 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF); 310 CU_ASSERT(pdu.data_iov[0].iov_len == 500); 311 312 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000); 313 314 CU_ASSERT(dif_ctx.data_offset == 500); 315 CU_ASSERT(pdu.data_len == 1000); 316 CU_ASSERT(pdu.data_iovcnt == 1); 317 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500)); 318 CU_ASSERT(pdu.data_iov[0].iov_len == 1016); 319 320 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548); 321 322 CU_ASSERT(dif_ctx.data_offset == 1500); 323 CU_ASSERT(pdu.data_len == 548); 324 CU_ASSERT(pdu.data_iovcnt == 1); 325 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516)); 326 CU_ASSERT(pdu.data_iov[0].iov_len == 564); 327 328 /* Multiple iovecs case */ 329 iovs[0].iov_base = (void *)0xDEADBEEF; 330 iovs[0].iov_len = 256; 331 iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000)); 332 iovs[1].iov_len = 256 + 1; 333 iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000)); 334 iovs[2].iov_len = 4; 335 iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000)); 336 iovs[3].iov_len = 3 + 123; 337 iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000)); 338 iovs[4].iov_len = 389 + 6; 339 iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000)); 340 iovs[5].iov_len = 2 + 512 + 8 + 432; 341 iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000)); 342 iovs[6].iov_len = 80 + 8; 343 344 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500); 345 346 CU_ASSERT(dif_ctx.data_offset == 0); 347 CU_ASSERT(pdu.data_len == 500); 348 CU_ASSERT(pdu.data_iovcnt == 2); 349 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF); 350 CU_ASSERT(pdu.data_iov[0].iov_len == 256); 351 CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000)); 352 CU_ASSERT(pdu.data_iov[1].iov_len == 244); 353 354 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000); 355 356 CU_ASSERT(dif_ctx.data_offset == 500); 357 CU_ASSERT(pdu.data_len == 1000); 358 CU_ASSERT(pdu.data_iovcnt == 5); 359 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244)); 360 CU_ASSERT(pdu.data_iov[0].iov_len == 13); 361 CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000)); 362 CU_ASSERT(pdu.data_iov[1].iov_len == 4); 363 CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000)); 364 CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123); 365 CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000)); 366 CU_ASSERT(pdu.data_iov[3].iov_len == 395); 367 CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000)); 368 CU_ASSERT(pdu.data_iov[4].iov_len == 478); 369 370 nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548); 371 372 CU_ASSERT(dif_ctx.data_offset == 1500); 373 CU_ASSERT(pdu.data_len == 548); 374 CU_ASSERT(pdu.data_iovcnt == 2); 375 CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478)); 376 CU_ASSERT(pdu.data_iov[0].iov_len == 476); 377 CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000)); 378 CU_ASSERT(pdu.data_iov[1].iov_len == 88); 379 } 380 381 static void 382 test_nvme_tcp_build_iovs_with_md(void) 383 { 384 struct nvme_tcp_pdu pdu = {}; 385 struct iovec iovs[11] = {}; 386 struct spdk_dif_ctx dif_ctx = {}; 387 uint32_t mapped_length = 0; 388 int rc; 389 390 rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0, 391 0, 0, 0, 0, 0); 392 CU_ASSERT(rc == 0); 393 394 pdu.dif_ctx = &dif_ctx; 395 396 pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 397 pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 398 pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 + 399 SPDK_NVME_TCP_DIGEST_LEN; 400 pdu.data_len = 512 * 8; 401 pdu.padding_len = 0; 402 403 pdu.data_iov[0].iov_base = (void *)0xDEADBEEF; 404 pdu.data_iov[0].iov_len = (512 + 8) * 8; 405 pdu.data_iovcnt = 1; 406 407 rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length); 408 CU_ASSERT(rc == 10); 409 CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw); 410 CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN); 411 CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF); 412 CU_ASSERT(iovs[1].iov_len == 512); 413 CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520)); 414 CU_ASSERT(iovs[2].iov_len == 512); 415 CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2)); 416 CU_ASSERT(iovs[3].iov_len == 512); 417 CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3)); 418 CU_ASSERT(iovs[4].iov_len == 512); 419 CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4)); 420 CU_ASSERT(iovs[5].iov_len == 512); 421 CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5)); 422 CU_ASSERT(iovs[6].iov_len == 512); 423 CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6)); 424 CU_ASSERT(iovs[7].iov_len == 512); 425 CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7)); 426 CU_ASSERT(iovs[8].iov_len == 512); 427 CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest); 428 CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN); 429 CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN + 430 512 * 8 + SPDK_NVME_TCP_DIGEST_LEN); 431 } 432 433 int main(int argc, char **argv) 434 { 435 CU_pSuite suite = NULL; 436 unsigned int num_failures; 437 438 CU_set_error_action(CUEA_ABORT); 439 CU_initialize_registry(); 440 441 suite = CU_add_suite("nvme_tcp", NULL, NULL); 442 CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf); 443 CU_ADD_TEST(suite, test_nvme_tcp_build_iovs); 444 CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request); 445 CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md); 446 CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md); 447 448 CU_basic_set_mode(CU_BRM_VERBOSE); 449 CU_basic_run_tests(); 450 num_failures = CU_get_number_of_failures(); 451 CU_cleanup_registry(); 452 return num_failures; 453 } 454