xref: /spdk/test/nvme/sgl/sgl.c (revision 57fd99b91e71a4baa5543e19ff83958dc99d4dac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/nvme.h"
9 #include "spdk/env.h"
10 #include "spdk/util.h"
11 
12 #define MAX_DEVS 64
13 
14 #define MAX_IOVS 128
15 
16 #define DATA_PATTERN 0x5A
17 
18 #define BASE_LBA_START 0x100000
19 
20 struct dev {
21 	struct spdk_nvme_ctrlr			*ctrlr;
22 	char					name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
23 };
24 
25 static struct dev devs[MAX_DEVS];
26 static int num_devs = 0;
27 
28 #define foreach_dev(iter) \
29 	for (iter = devs; iter - devs < num_devs; iter++)
30 
31 static int io_complete_flag = 0;
32 
33 struct sgl_element {
34 	void *base;
35 	size_t offset;
36 	size_t len;
37 };
38 
39 struct io_request {
40 	uint32_t current_iov_index;
41 	uint32_t current_iov_bytes_left;
42 	struct sgl_element iovs[MAX_IOVS];
43 	uint32_t nseg;
44 	uint32_t misalign;
45 };
46 
47 static void
48 nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
49 {
50 	uint32_t i;
51 	uint32_t offset = 0;
52 	struct sgl_element *iov;
53 	struct io_request *req = (struct io_request *)cb_arg;
54 
55 	for (i = 0; i < req->nseg; i++) {
56 		iov = &req->iovs[i];
57 		offset += iov->len;
58 		if (offset > sgl_offset) {
59 			break;
60 		}
61 	}
62 	req->current_iov_index = i;
63 	req->current_iov_bytes_left = offset - sgl_offset;
64 	return;
65 }
66 
67 static int
68 nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
69 {
70 	struct io_request *req = (struct io_request *)cb_arg;
71 	struct sgl_element *iov;
72 
73 	if (req->current_iov_index >= req->nseg) {
74 		*length = 0;
75 		*address = NULL;
76 		return 0;
77 	}
78 
79 	iov = &req->iovs[req->current_iov_index];
80 
81 	if (req->current_iov_bytes_left) {
82 		*address = iov->base + iov->offset + iov->len - req->current_iov_bytes_left;
83 		*length = req->current_iov_bytes_left;
84 		req->current_iov_bytes_left = 0;
85 	} else {
86 		*address = iov->base + iov->offset;
87 		*length = iov->len;
88 	}
89 
90 	req->current_iov_index++;
91 
92 	return 0;
93 }
94 
95 static void
96 io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
97 {
98 	if (spdk_nvme_cpl_is_error(cpl)) {
99 		io_complete_flag = 2;
100 	} else {
101 		io_complete_flag = 1;
102 	}
103 }
104 
105 static void
106 build_io_request_0(struct io_request *req)
107 {
108 	req->nseg = 1;
109 
110 	req->iovs[0].base = spdk_zmalloc(0x800, 4, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
111 	req->iovs[0].len = 0x800;
112 }
113 
114 static void
115 build_io_request_1(struct io_request *req)
116 {
117 	req->nseg = 1;
118 
119 	/* 512B for 1st sge */
120 	req->iovs[0].base = spdk_zmalloc(0x200, 0x200, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
121 	req->iovs[0].len = 0x200;
122 }
123 
124 static void
125 build_io_request_2(struct io_request *req)
126 {
127 	req->nseg = 1;
128 
129 	/* 256KB for 1st sge */
130 	req->iovs[0].base = spdk_zmalloc(0x40000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
131 	req->iovs[0].len = 0x40000;
132 }
133 
134 static void
135 build_io_request_3(struct io_request *req)
136 {
137 	req->nseg = 3;
138 
139 	/* 2KB for 1st sge, make sure the iov address start at 0x800 boundary,
140 	 *  and end with 0x1000 boundary */
141 	req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
142 	req->iovs[0].offset = 0x800;
143 	req->iovs[0].len = 0x800;
144 
145 	/* 4KB for 2th sge */
146 	req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
147 	req->iovs[1].len = 0x1000;
148 
149 	/* 12KB for 3th sge */
150 	req->iovs[2].base = spdk_zmalloc(0x3000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
151 	req->iovs[2].len = 0x3000;
152 }
153 
154 static void
155 build_io_request_4(struct io_request *req)
156 {
157 	uint32_t i;
158 
159 	req->nseg = 32;
160 
161 	/* 4KB for 1st sge */
162 	req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
163 	req->iovs[0].len = 0x1000;
164 
165 	/* 8KB for the rest 31 sge */
166 	for (i = 1; i < req->nseg; i++) {
167 		req->iovs[i].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
168 		req->iovs[i].len = 0x2000;
169 	}
170 }
171 
172 static void
173 build_io_request_5(struct io_request *req)
174 {
175 	req->nseg = 1;
176 
177 	/* 8KB for 1st sge */
178 	req->iovs[0].base = spdk_zmalloc(0x2000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
179 	req->iovs[0].len = 0x2000;
180 }
181 
182 static void
183 build_io_request_6(struct io_request *req)
184 {
185 	req->nseg = 2;
186 
187 	/* 4KB for 1st sge */
188 	req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
189 	req->iovs[0].len = 0x1000;
190 
191 	/* 4KB for 2nd sge */
192 	req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
193 	req->iovs[1].len = 0x1000;
194 }
195 
196 static void
197 build_io_request_7(struct io_request *req)
198 {
199 	uint8_t *base;
200 
201 	req->nseg = 1;
202 
203 	/*
204 	 * Create a 64KB sge, but ensure it is *not* aligned on a 4KB
205 	 *  boundary.  This is valid for single element buffers with PRP.
206 	 */
207 	base = spdk_zmalloc(0x11000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
208 	req->misalign = 64;
209 	req->iovs[0].base = base + req->misalign;
210 	req->iovs[0].len = 0x10000;
211 }
212 
213 static void
214 build_io_request_8(struct io_request *req)
215 {
216 	req->nseg = 2;
217 
218 	/*
219 	 * 1KB for 1st sge, make sure the iov address does not start and end
220 	 * at 0x1000 boundary
221 	 */
222 	req->iovs[0].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
223 	req->iovs[0].offset = 0x400;
224 	req->iovs[0].len = 0x400;
225 
226 	/*
227 	 * 1KB for 1st sge, make sure the iov address does not start and end
228 	 * at 0x1000 boundary
229 	 */
230 	req->iovs[1].base = spdk_zmalloc(0x1000, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
231 	req->iovs[1].offset = 0x400;
232 	req->iovs[1].len = 0x400;
233 }
234 
235 static void
236 build_io_request_9(struct io_request *req)
237 {
238 	/*
239 	 * Check if mixed PRP complaint and not complaint requests are handled
240 	 * properly by splitting them into subrequests.
241 	 * Construct buffers with following theme:
242 	 */
243 	const size_t req_len[] = {  2048, 4096, 2048,  4096,  2048,  1024 };
244 	const size_t req_off[] = { 0x800,  0x0,  0x0, 0x100, 0x800, 0x800 };
245 	struct sgl_element *iovs = req->iovs;
246 	uint32_t i;
247 	req->nseg = SPDK_COUNTOF(req_len);
248 	assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
249 
250 	for (i = 0; i < req->nseg; i++) {
251 		iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
252 					    SPDK_MALLOC_DMA);
253 		iovs[i].offset = req_off[i];
254 		iovs[i].len = req_len[i];
255 	}
256 }
257 
258 static void
259 build_io_request_10(struct io_request *req)
260 {
261 	/*
262 	 * Test the case where we have a valid PRP list, but the first and last
263 	 * elements are not exact multiples of the logical block size.
264 	 */
265 	const size_t req_len[] = {  4004, 4096,  92 };
266 	const size_t req_off[] = {  0x5c,  0x0, 0x0 };
267 	struct sgl_element *iovs = req->iovs;
268 	uint32_t i;
269 	req->nseg = SPDK_COUNTOF(req_len);
270 	assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
271 
272 	for (i = 0; i < req->nseg; i++) {
273 		iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
274 					    SPDK_MALLOC_DMA);
275 		iovs[i].offset = req_off[i];
276 		iovs[i].len = req_len[i];
277 	}
278 }
279 
280 static void
281 build_io_request_11(struct io_request *req)
282 {
283 	/* This test case focuses on the last element not starting on a page boundary. */
284 	const size_t req_len[] = { 512, 512 };
285 	const size_t req_off[] = { 0xe00, 0x800 };
286 	struct sgl_element *iovs = req->iovs;
287 	uint32_t i;
288 	req->nseg = SPDK_COUNTOF(req_len);
289 	assert(SPDK_COUNTOF(req_len) == SPDK_COUNTOF(req_off));
290 
291 	for (i = 0; i < req->nseg; i++) {
292 		iovs[i].base = spdk_zmalloc(req_off[i] + req_len[i], 0x4000, NULL, SPDK_ENV_LCORE_ID_ANY,
293 					    SPDK_MALLOC_DMA);
294 		iovs[i].offset = req_off[i];
295 		iovs[i].len = req_len[i];
296 	}
297 }
298 
299 typedef void (*nvme_build_io_req_fn_t)(struct io_request *req);
300 
301 static void
302 free_req(struct io_request *req)
303 {
304 	uint32_t i;
305 
306 	if (req == NULL) {
307 		return;
308 	}
309 
310 	for (i = 0; i < req->nseg; i++) {
311 		spdk_free(req->iovs[i].base - req->misalign);
312 	}
313 
314 	spdk_free(req);
315 }
316 
317 static int
318 writev_readv_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
319 {
320 	int rc = 0;
321 	uint32_t len, lba_count;
322 	uint32_t i, j, nseg, remainder;
323 	char *buf;
324 
325 	struct io_request *req;
326 	struct spdk_nvme_ns *ns;
327 	struct spdk_nvme_qpair *qpair;
328 	const struct spdk_nvme_ns_data *nsdata;
329 
330 	ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
331 	if (!ns) {
332 		fprintf(stderr, "Null namespace\n");
333 		return 0;
334 	}
335 	nsdata = spdk_nvme_ns_get_data(ns);
336 	if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
337 		fprintf(stderr, "Empty nsdata or wrong sector size\n");
338 		return 0;
339 	}
340 
341 	if (spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED) {
342 		return 0;
343 	}
344 
345 	req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
346 	if (!req) {
347 		fprintf(stderr, "Allocate request failed\n");
348 		return 0;
349 	}
350 
351 	/* IO parameters setting */
352 	build_io_fn(req);
353 
354 	len = 0;
355 	for (i = 0; i < req->nseg; i++) {
356 		struct sgl_element *sge = &req->iovs[i];
357 
358 		len += sge->len;
359 	}
360 
361 	lba_count = len / spdk_nvme_ns_get_sector_size(ns);
362 	remainder = len % spdk_nvme_ns_get_sector_size(ns);
363 	if (!lba_count || remainder || (BASE_LBA_START + lba_count > (uint32_t)nsdata->nsze)) {
364 		fprintf(stderr, "%s: %s Invalid IO length parameter\n", dev->name, test_name);
365 		free_req(req);
366 		return 0;
367 	}
368 
369 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
370 	if (!qpair) {
371 		free_req(req);
372 		return -1;
373 	}
374 
375 	nseg = req->nseg;
376 	for (i = 0; i < nseg; i++) {
377 		memset(req->iovs[i].base + req->iovs[i].offset, DATA_PATTERN, req->iovs[i].len);
378 	}
379 
380 	rc = spdk_nvme_ns_cmd_writev(ns, qpair, BASE_LBA_START, lba_count,
381 				     io_complete, req, 0,
382 				     nvme_request_reset_sgl,
383 				     nvme_request_next_sge);
384 
385 	if (rc != 0) {
386 		fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name);
387 		spdk_nvme_ctrlr_free_io_qpair(qpair);
388 		free_req(req);
389 		return -1;
390 	}
391 
392 	io_complete_flag = 0;
393 
394 	while (!io_complete_flag) {
395 		spdk_nvme_qpair_process_completions(qpair, 1);
396 	}
397 
398 	if (io_complete_flag != 1) {
399 		fprintf(stderr, "%s: %s writev failed\n", dev->name, test_name);
400 		spdk_nvme_ctrlr_free_io_qpair(qpair);
401 		free_req(req);
402 		return -1;
403 	}
404 
405 	/* reset completion flag */
406 	io_complete_flag = 0;
407 
408 	for (i = 0; i < nseg; i++) {
409 		memset(req->iovs[i].base + req->iovs[i].offset, 0, req->iovs[i].len);
410 	}
411 
412 	rc = spdk_nvme_ns_cmd_readv(ns, qpair, BASE_LBA_START, lba_count,
413 				    io_complete, req, 0,
414 				    nvme_request_reset_sgl,
415 				    nvme_request_next_sge);
416 
417 	if (rc != 0) {
418 		fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name);
419 		spdk_nvme_ctrlr_free_io_qpair(qpair);
420 		free_req(req);
421 		return -1;
422 	}
423 
424 	while (!io_complete_flag) {
425 		spdk_nvme_qpair_process_completions(qpair, 1);
426 	}
427 
428 	if (io_complete_flag != 1) {
429 		fprintf(stderr, "%s: %s readv failed\n", dev->name, test_name);
430 		spdk_nvme_ctrlr_free_io_qpair(qpair);
431 		free_req(req);
432 		return -1;
433 	}
434 
435 	for (i = 0; i < nseg; i++) {
436 		buf = (char *)req->iovs[i].base + req->iovs[i].offset;
437 		for (j = 0; j < req->iovs[i].len; j++) {
438 			if (buf[j] != DATA_PATTERN) {
439 				fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
440 				spdk_nvme_ctrlr_free_io_qpair(qpair);
441 				free_req(req);
442 				return -1;
443 			}
444 		}
445 	}
446 
447 	fprintf(stdout, "%s: %s test passed\n", dev->name, test_name);
448 	spdk_nvme_ctrlr_free_io_qpair(qpair);
449 	free_req(req);
450 	return rc;
451 }
452 
453 static bool
454 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
455 	 struct spdk_nvme_ctrlr_opts *opts)
456 {
457 	printf("Attaching to %s\n", trid->traddr);
458 
459 	return true;
460 }
461 
462 static void
463 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
464 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
465 {
466 	struct dev *dev;
467 
468 	/* add to dev list */
469 	dev = &devs[num_devs++];
470 
471 	dev->ctrlr = ctrlr;
472 
473 	snprintf(dev->name, sizeof(dev->name), "%s",
474 		 trid->traddr);
475 
476 	printf("Attached to %s\n", dev->name);
477 }
478 
479 int
480 main(int argc, char **argv)
481 {
482 	struct dev		*iter;
483 	int			rc;
484 	struct spdk_env_opts	opts;
485 	struct spdk_nvme_detach_ctx *detach_ctx = NULL;
486 
487 	opts.opts_size = sizeof(opts);
488 	spdk_env_opts_init(&opts);
489 	opts.name = "nvme_sgl";
490 	opts.core_mask = "0x1";
491 	opts.shm_id = 0;
492 	if (spdk_env_init(&opts) < 0) {
493 		fprintf(stderr, "Unable to initialize SPDK env\n");
494 		return 1;
495 	}
496 
497 	printf("NVMe Readv/Writev Request test\n");
498 
499 	if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
500 		fprintf(stderr, "nvme_probe() failed\n");
501 		exit(1);
502 	}
503 
504 	rc = 0;
505 	foreach_dev(iter) {
506 #define TEST(x) writev_readv_tests(iter, x, #x)
507 		if (TEST(build_io_request_0)
508 		    || TEST(build_io_request_1)
509 		    || TEST(build_io_request_2)
510 		    || TEST(build_io_request_3)
511 		    || TEST(build_io_request_4)
512 		    || TEST(build_io_request_5)
513 		    || TEST(build_io_request_6)
514 		    || TEST(build_io_request_7)
515 		    || TEST(build_io_request_8)
516 		    || TEST(build_io_request_9)
517 		    || TEST(build_io_request_10)
518 		    || TEST(build_io_request_11)) {
519 #undef TEST
520 			rc = 1;
521 			printf("%s: failed sgl tests\n", iter->name);
522 		}
523 	}
524 
525 	printf("Cleaning up...\n");
526 
527 	foreach_dev(iter) {
528 		spdk_nvme_detach_async(iter->ctrlr, &detach_ctx);
529 	}
530 
531 	if (detach_ctx) {
532 		spdk_nvme_detach_poll(detach_ctx);
533 	}
534 
535 	return rc;
536 }
537