1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/nvme.h" 37 #include "spdk/env.h" 38 #include "spdk/util.h" 39 40 #define MAX_DEVS 64 41 42 #define MAX_IOVS 128 43 44 #define DATA_PATTERN 0x5A 45 46 #define BASE_LBA_START 0x100000 47 48 struct dev { 49 struct spdk_nvme_ctrlr *ctrlr; 50 char name[SPDK_NVMF_TRADDR_MAX_LEN + 1]; 51 }; 52 53 static struct dev devs[MAX_DEVS]; 54 static int num_devs = 0; 55 56 #define foreach_dev(iter) \ 57 for (iter = devs; iter - devs < num_devs; iter++) 58 59 static int io_complete_flag = 0; 60 61 struct sgl_element { 62 void *base; 63 size_t offset; 64 size_t len; 65 }; 66 67 struct io_request { 68 uint32_t current_iov_index; 69 uint32_t current_iov_bytes_left; 70 struct sgl_element iovs[MAX_IOVS]; 71 uint32_t nseg; 72 uint32_t misalign; 73 }; 74 75 static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset) 76 { 77 uint32_t i; 78 uint32_t offset = 0; 79 struct sgl_element *iov; 80 struct io_request *req = (struct io_request *)cb_arg; 81 82 for (i = 0; i < req->nseg; i++) { 83 iov = &req->iovs[i]; 84 offset += iov->len; 85 if (offset > sgl_offset) { 86 break; 87 } 88 } 89 req->current_iov_index = i; 90 req->current_iov_bytes_left = offset - sgl_offset; 91 return; 92 } 93 94 static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length) 95 { 96 struct io_request *req = (struct io_request *)cb_arg; 97 struct sgl_element *iov; 98 99 if (req->current_iov_index >= req->nseg) { 100 *length = 0; 101 *address = NULL; 102 return 0; 103 } 104 105 iov = &req->iovs[req->current_iov_index]; 106 107 if (req->current_iov_bytes_left) { 108 *address = iov->base + iov->offset + iov->len - req->current_iov_bytes_left; 109 *length = req->current_iov_bytes_left; 110 req->current_iov_bytes_left = 0; 111 } else { 112 *address = iov->base + iov->offset; 113 *length = iov->len; 114 } 115 116 req->current_iov_index++; 117 118 return 0; 119 } 120 121 static void 122 io_complete(void *ctx, const struct spdk_nvme_cpl *cpl) 123 { 124 if (spdk_nvme_cpl_is_error(cpl)) { 125 io_complete_flag = 2; 126 } else { 127 io_complete_flag = 1; 128 } 129 } 130 131 static void build_io_request_0(struct io_request *req) 132 { 133 req->nseg = 1; 134 135 req->iovs[0].base = spdk_zmalloc(0x800, 4, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 136 req->iovs[0].len = 0x800; 137 } 138 139 static void build_io_request_1(struct io_request *req) 140 { 141 req->nseg = 1; 142 143 /* 512B for 1st sge */ 144 req->iovs[0].base = spdk_zmalloc(0x200, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 145 req->iovs[0].len = 0x200; 146 } 147 148 static void build_io_request_2(struct io_request *req) 149 { 150 req->nseg = 1; 151 152 /* 256KB for 1st sge */ 153 req->iovs[0].base = spdk_zmalloc(0x40000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 154 req->iovs[0].len = 0x40000; 155 } 156 157 static void build_io_request_3(struct io_request *req) 158 { 159 req->nseg = 3; 160 161 /* 2KB for 1st sge, make sure the iov address start at 0x800 boundary, 162 * and end with 0x1000 boundary */ 163 req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 164 req->iovs[0].offset = 0x800; 165 req->iovs[0].len = 0x800; 166 167 /* 4KB for 2th sge */ 168 req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 169 req->iovs[1].len = 0x1000; 170 171 /* 12KB for 3th sge */ 172 req->iovs[2].base = spdk_zmalloc(0x3000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 173 req->iovs[2].len = 0x3000; 174 } 175 176 static void build_io_request_4(struct io_request *req) 177 { 178 uint32_t i; 179 180 req->nseg = 32; 181 182 /* 4KB for 1st sge */ 183 req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 184 req->iovs[0].len = 0x1000; 185 186 /* 8KB for the rest 31 sge */ 187 for (i = 1; i < req->nseg; i++) { 188 req->iovs[i].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 189 req->iovs[i].len = 0x2000; 190 } 191 } 192 193 static void build_io_request_5(struct io_request *req) 194 { 195 req->nseg = 1; 196 197 /* 8KB for 1st sge */ 198 req->iovs[0].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 199 req->iovs[0].len = 0x2000; 200 } 201 202 static void build_io_request_6(struct io_request *req) 203 { 204 req->nseg = 2; 205 206 /* 4KB for 1st sge */ 207 req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 208 req->iovs[0].len = 0x1000; 209 210 /* 4KB for 2st sge */ 211 req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 212 req->iovs[1].len = 0x1000; 213 } 214 215 static void build_io_request_7(struct io_request *req) 216 { 217 uint8_t *base; 218 219 req->nseg = 1; 220 221 /* 222 * Create a 64KB sge, but ensure it is *not* aligned on a 4KB 223 * boundary. This is valid for single element buffers with PRP. 224 */ 225 base = spdk_zmalloc(0x11000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 226 req->misalign = 64; 227 req->iovs[0].base = base + req->misalign; 228 req->iovs[0].len = 0x10000; 229 } 230 231 static void build_io_request_8(struct io_request *req) 232 { 233 req->nseg = 2; 234 235 /* 236 * 1KB for 1st sge, make sure the iov address does not start and end 237 * at 0x1000 boundary 238 */ 239 req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 240 req->iovs[0].offset = 0x400; 241 req->iovs[0].len = 0x400; 242 243 /* 244 * 1KB for 1st sge, make sure the iov address does not start and end 245 * at 0x1000 boundary 246 */ 247 req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 248 req->iovs[1].offset = 0x400; 249 req->iovs[1].len = 0x400; 250 } 251 252 static void build_io_request_9(struct io_request *req) 253 { 254 /* 255 * Check if mixed PRP complaint and not complaint requests are handled 256 * properly by splitting them into subrequests. 257 * Construct buffers with following theme: 258 */ 259 const size_t req_len[] = { 2048, 4096, 2048, 4096, 2048, 1024 }; 260 const size_t req_off[] = { 0x800, 0x0, 0x0, 0x100, 0x800, 0x800 }; 261 struct sgl_element *iovs = req->iovs; 262 uint32_t i; 263 req->nseg = SPDK_COUNTOF(req_len); 264 assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off)); 265 266 for (i = 0; i < req->nseg; i++) { 267 iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY, 268 SPDK_MALLOC_DMA); 269 iovs[i].offset = req_off[i]; 270 iovs[i].len = req_len[i]; 271 } 272 } 273 274 static void build_io_request_10(struct io_request *req) 275 { 276 /* 277 * Test the case where we have a valid PRP list, but the first and last 278 * elements are not exact multiples of the logical block size. 279 */ 280 const size_t req_len[] = { 4004, 4096, 92 }; 281 const size_t req_off[] = { 0x5c, 0x0, 0x0 }; 282 struct sgl_element *iovs = req->iovs; 283 uint32_t i; 284 req->nseg = SPDK_COUNTOF(req_len); 285 assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off)); 286 287 for (i = 0; i < req->nseg; i++) { 288 iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY, 289 SPDK_MALLOC_DMA); 290 iovs[i].offset = req_off[i]; 291 iovs[i].len = req_len[i]; 292 } 293 } 294 295 static void build_io_request_11(struct io_request *req) 296 { 297 /* This test case focuses on the last element not starting on a page boundary. */ 298 const size_t req_len[] = { 512, 512 }; 299 const size_t req_off[] = { 0xe00, 0x800 }; 300 struct sgl_element *iovs = req->iovs; 301 uint32_t i; 302 req->nseg = SPDK_COUNTOF(req_len); 303 assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off)); 304 305 for (i = 0; i < req->nseg; i++) { 306 iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY, 307 SPDK_MALLOC_DMA); 308 iovs[i].offset = req_off[i]; 309 iovs[i].len = req_len[i]; 310 } 311 } 312 313 typedef void (*nvme_build_io_req_fn_t)(struct io_request *req); 314 315 static void 316 free_req(struct io_request *req) 317 { 318 uint32_t i; 319 320 if (req == NULL) { 321 return; 322 } 323 324 for (i = 0; i < req->nseg; i++) { 325 spdk_free(req->iovs[i].base - req->misalign); 326 } 327 328 spdk_free(req); 329 } 330 331 static int 332 writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name) 333 { 334 int rc = 0; 335 uint32_t len, lba_count; 336 uint32_t i, j, nseg, remainder; 337 char *buf; 338 339 struct io_request *req; 340 struct spdk_nvme_ns *ns; 341 struct spdk_nvme_qpair *qpair; 342 const struct spdk_nvme_ns_data *nsdata; 343 344 ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1); 345 if (!ns) { 346 fprintf(stderr, "Null namespace\n"); 347 return 0; 348 } 349 nsdata = spdk_nvme_ns_get_data(ns); 350 if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) { 351 fprintf(stderr, "Empty nsdata or wrong sector size\n"); 352 return 0; 353 } 354 355 if (spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED) { 356 return 0; 357 } 358 359 req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 360 if (!req) { 361 fprintf(stderr, "Allocate request failed\n"); 362 return 0; 363 } 364 365 /* IO parameters setting */ 366 build_io_fn(req); 367 368 len = 0; 369 for (i = 0; i < req->nseg; i++) { 370 struct sgl_element *sge = &req->iovs[i]; 371 372 len += sge->len; 373 } 374 375 lba_count = len / spdk_nvme_ns_get_sector_size(ns); 376 remainder = len % spdk_nvme_ns_get_sector_size(ns); 377 if (!lba_count || remainder || (BASE_LBA_START + lba_count > (uint32_t)nsdata->nsze)) { 378 fprintf(stderr, "%s: %s Invalid IO length parameter\n", dev->name, test_name); 379 free_req(req); 380 return 0; 381 } 382 383 qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0); 384 if (!qpair) { 385 free_req(req); 386 return -1; 387 } 388 389 nseg = req->nseg; 390 for (i = 0; i < nseg; i++) { 391 memset(req->iovs[i].base + req->iovs[i].offset, DATA_PATTERN, req->iovs[i].len); 392 } 393 394 rc = spdk_nvme_ns_cmd_writev(ns, qpair, BASE_LBA_START, lba_count, 395 io_complete, req, 0, 396 nvme_request_reset_sgl, 397 nvme_request_next_sge); 398 399 if (rc != 0) { 400 fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name); 401 spdk_nvme_ctrlr_free_io_qpair(qpair); 402 free_req(req); 403 return -1; 404 } 405 406 io_complete_flag = 0; 407 408 while (!io_complete_flag) { 409 spdk_nvme_qpair_process_completions(qpair, 1); 410 } 411 412 if (io_complete_flag != 1) { 413 fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name); 414 spdk_nvme_ctrlr_free_io_qpair(qpair); 415 free_req(req); 416 return -1; 417 } 418 419 /* reset completion flag */ 420 io_complete_flag = 0; 421 422 for (i = 0; i < nseg; i++) { 423 memset(req->iovs[i].base + req->iovs[i].offset, 0, req->iovs[i].len); 424 } 425 426 rc = spdk_nvme_ns_cmd_readv(ns, qpair, BASE_LBA_START, lba_count, 427 io_complete, req, 0, 428 nvme_request_reset_sgl, 429 nvme_request_next_sge); 430 431 if (rc != 0) { 432 fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name); 433 spdk_nvme_ctrlr_free_io_qpair(qpair); 434 free_req(req); 435 return -1; 436 } 437 438 while (!io_complete_flag) { 439 spdk_nvme_qpair_process_completions(qpair, 1); 440 } 441 442 if (io_complete_flag != 1) { 443 fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name); 444 spdk_nvme_ctrlr_free_io_qpair(qpair); 445 free_req(req); 446 return -1; 447 } 448 449 for (i = 0; i < nseg; i++) { 450 buf = (char *)req->iovs[i].base + req->iovs[i].offset; 451 for (j = 0; j < req->iovs[i].len; j++) { 452 if (buf[j] != DATA_PATTERN) { 453 fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name); 454 spdk_nvme_ctrlr_free_io_qpair(qpair); 455 free_req(req); 456 return -1; 457 } 458 } 459 } 460 461 fprintf(stdout, "%s: %s test passed\n", dev->name, test_name); 462 spdk_nvme_ctrlr_free_io_qpair(qpair); 463 free_req(req); 464 return rc; 465 } 466 467 static bool 468 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 469 struct spdk_nvme_ctrlr_opts *opts) 470 { 471 printf("Attaching to %s\n", trid->traddr); 472 473 return true; 474 } 475 476 static void 477 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, 478 struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts) 479 { 480 struct dev *dev; 481 482 /* add to dev list */ 483 dev = &devs[num_devs++]; 484 485 dev->ctrlr = ctrlr; 486 487 snprintf(dev->name, sizeof(dev->name), "%s", 488 trid->traddr); 489 490 printf("Attached to %s\n", dev->name); 491 } 492 493 int main(int argc, char **argv) 494 { 495 struct dev *iter; 496 int rc; 497 struct spdk_env_opts opts; 498 499 spdk_env_opts_init(&opts); 500 opts.name = "nvme_sgl"; 501 opts.core_mask = "0x1"; 502 opts.shm_id = 0; 503 if (spdk_env_init(&opts) < 0) { 504 fprintf(stderr, "Unable to initialize SPDK env\n"); 505 return 1; 506 } 507 508 printf("NVMe Readv/Writev Request test\n"); 509 510 if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) { 511 fprintf(stderr, "nvme_probe() failed\n"); 512 exit(1); 513 } 514 515 rc = 0; 516 foreach_dev(iter) { 517 #define TEST(x) writev_readv_tests(iter, x, #x) 518 if (TEST(build_io_request_0) 519 || TEST(build_io_request_1) 520 || TEST(build_io_request_2) 521 || TEST(build_io_request_3) 522 || TEST(build_io_request_4) 523 || TEST(build_io_request_5) 524 || TEST(build_io_request_6) 525 || TEST(build_io_request_7) 526 || TEST(build_io_request_8) 527 || TEST(build_io_request_9) 528 || TEST(build_io_request_10) 529 || TEST(build_io_request_11)) { 530 #undef TEST 531 rc = 1; 532 printf("%s: failed sgl tests\n", iter->name); 533 } 534 } 535 536 printf("Cleaning up...\n"); 537 538 foreach_dev(iter) { 539 spdk_nvme_detach(iter->ctrlr); 540 } 541 542 return rc; 543 } 544