xref: /spdk/test/nvme/e2edp/nvme_dp.c (revision 2172c432cfdaecc5a279d64e37c6b51e794683c1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe end-to-end data protection test
36  */
37 
38 #include "spdk/stdinc.h"
39 
40 #include "spdk/nvme.h"
41 #include "spdk/env.h"
42 #include "spdk/crc16.h"
43 #include "spdk/endian.h"
44 #include "spdk/memory.h"
45 
46 #define MAX_DEVS 64
47 
48 #define DATA_PATTERN 0x5A
49 
50 struct dev {
51 	struct spdk_nvme_ctrlr			*ctrlr;
52 	char					name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
53 };
54 
55 static struct dev devs[MAX_DEVS];
56 static int num_devs = 0;
57 
58 #define foreach_dev(iter) \
59 	for (iter = devs; iter - devs < num_devs; iter++)
60 
61 static int io_complete_flag = 0;
62 
63 struct io_request {
64 	void *contig;
65 	void *metadata;
66 	bool use_extended_lba;
67 	bool use_sgl;
68 	uint32_t sgl_offset;
69 	uint32_t buf_size;
70 	uint64_t lba;
71 	uint32_t lba_count;
72 	uint16_t apptag_mask;
73 	uint16_t apptag;
74 };
75 
76 static void
77 io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
78 {
79 	if (spdk_nvme_cpl_is_error(cpl)) {
80 		io_complete_flag = 2;
81 	} else {
82 		io_complete_flag = 1;
83 	}
84 }
85 
86 static void
87 ns_data_buffer_reset(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
88 {
89 	uint32_t md_size, sector_size;
90 	uint32_t i, offset = 0;
91 	uint8_t *buf;
92 
93 	sector_size = spdk_nvme_ns_get_sector_size(ns);
94 	md_size = spdk_nvme_ns_get_md_size(ns);
95 
96 	for (i = 0; i < req->lba_count; i++) {
97 		if (req->use_extended_lba) {
98 			offset = (sector_size + md_size) * i;
99 		} else {
100 			offset = sector_size * i;
101 		}
102 
103 		buf = (uint8_t *)req->contig + offset;
104 		memset(buf, data_pattern, sector_size);
105 	}
106 }
107 
108 static void nvme_req_reset_sgl(void *cb_arg, uint32_t sgl_offset)
109 {
110 	struct io_request *req = (struct io_request *)cb_arg;
111 
112 	req->sgl_offset = sgl_offset;
113 	return;
114 }
115 
116 static int nvme_req_next_sge(void *cb_arg, void **address, uint32_t *length)
117 {
118 	struct io_request *req = (struct io_request *)cb_arg;
119 	void *payload;
120 
121 	payload = req->contig + req->sgl_offset;
122 	*address = payload;
123 
124 	*length = req->buf_size - req->sgl_offset;
125 
126 	return 0;
127 }
128 
129 /* CRC-16 Guard checked for extended lba format */
130 static uint32_t dp_guard_check_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
131 		uint32_t *io_flags)
132 {
133 	struct spdk_nvme_protection_info *pi;
134 	uint32_t md_size, sector_size;
135 
136 	req->lba_count = 2;
137 
138 	/* extended LBA only for the test case */
139 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
140 		return 0;
141 	}
142 
143 	sector_size = spdk_nvme_ns_get_sector_size(ns);
144 	md_size = spdk_nvme_ns_get_md_size(ns);
145 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
146 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
147 	assert(req->contig);
148 
149 	req->lba = 0;
150 	req->use_extended_lba = true;
151 	req->use_sgl = true;
152 	req->buf_size = (sector_size + md_size) * req->lba_count;
153 	req->metadata = NULL;
154 	ns_data_buffer_reset(ns, req, DATA_PATTERN);
155 	pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8);
156 	/* big-endian for guard */
157 	to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig, sector_size));
158 
159 	pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
160 	to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig + sector_size + md_size, sector_size));
161 
162 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD;
163 
164 	return req->lba_count;
165 }
166 
167 /*
168  * No protection information with PRACT setting to 1,
169  *  both extended LBA format and separate metadata can
170  *  run the test case.
171  */
172 static uint32_t dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req,
173 				   uint32_t *io_flags)
174 {
175 	uint32_t md_size, sector_size, data_len;
176 
177 	req->lba_count = 8;
178 
179 	sector_size = spdk_nvme_ns_get_sector_size(ns);
180 	md_size = spdk_nvme_ns_get_md_size(ns);
181 	if (md_size == 8) {
182 		/* No additional metadata buffer provided */
183 		data_len = sector_size * req->lba_count;
184 	} else {
185 		data_len = (sector_size + md_size) * req->lba_count;
186 	}
187 	req->contig = spdk_zmalloc(data_len, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
188 				   SPDK_MALLOC_DMA);
189 	assert(req->contig);
190 
191 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
192 				     SPDK_MALLOC_DMA);
193 	assert(req->metadata);
194 
195 	switch (spdk_nvme_ns_get_pi_type(ns)) {
196 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
197 		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT;
198 		break;
199 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
200 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
201 		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
202 			    SPDK_NVME_IO_FLAGS_PRACT;
203 		break;
204 	default:
205 		*io_flags = 0;
206 		break;
207 	}
208 
209 	req->lba = 0;
210 	req->use_extended_lba = false;
211 
212 	return req->lba_count;
213 }
214 
215 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
216 static uint32_t dp_without_pract_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
217 		uint32_t *io_flags)
218 {
219 	struct spdk_nvme_protection_info *pi;
220 	uint32_t md_size, sector_size;
221 
222 	req->lba_count = 2;
223 
224 	switch (spdk_nvme_ns_get_pi_type(ns)) {
225 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
226 		return 0;
227 	default:
228 		break;
229 	}
230 
231 	/* extended LBA only for the test case */
232 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
233 		return 0;
234 	}
235 
236 	sector_size = spdk_nvme_ns_get_sector_size(ns);
237 	md_size = spdk_nvme_ns_get_md_size(ns);
238 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
239 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
240 	assert(req->contig);
241 
242 	req->lba = 0;
243 	req->use_extended_lba = true;
244 	req->metadata = NULL;
245 	pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8);
246 	/* big-endian for reference tag */
247 	to_be32(&pi->ref_tag, (uint32_t)req->lba);
248 
249 	pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
250 	/* is incremented for each subsequent logical block */
251 	to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
252 
253 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
254 
255 	return req->lba_count;
256 }
257 
258 /* LBA + Metadata without data protection bits setting */
259 static uint32_t dp_without_flags_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
260 		uint32_t *io_flags)
261 {
262 	uint32_t md_size, sector_size;
263 
264 	req->lba_count = 16;
265 
266 	/* extended LBA only for the test case */
267 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
268 		return 0;
269 	}
270 
271 	sector_size = spdk_nvme_ns_get_sector_size(ns);
272 	md_size = spdk_nvme_ns_get_md_size(ns);
273 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
274 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
275 	assert(req->contig);
276 
277 	req->lba = 0;
278 	req->use_extended_lba = true;
279 	req->metadata = NULL;
280 	*io_flags = 0;
281 
282 	return req->lba_count;
283 }
284 
285 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
286 static uint32_t dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
287 		uint32_t *io_flags)
288 {
289 	struct spdk_nvme_protection_info *pi;
290 	uint32_t md_size, sector_size;
291 
292 	req->lba_count = 2;
293 
294 	switch (spdk_nvme_ns_get_pi_type(ns)) {
295 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
296 		return 0;
297 	default:
298 		break;
299 	}
300 
301 	/* separate metadata payload for the test case */
302 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
303 		return 0;
304 	}
305 
306 	sector_size = spdk_nvme_ns_get_sector_size(ns);
307 	md_size = spdk_nvme_ns_get_md_size(ns);
308 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
309 				   SPDK_MALLOC_DMA);
310 	assert(req->contig);
311 
312 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
313 				     SPDK_MALLOC_DMA);
314 	assert(req->metadata);
315 
316 	req->lba = 0;
317 	req->use_extended_lba = false;
318 
319 	/* last 8 bytes if the metadata size bigger than 8 */
320 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
321 	/* big-endian for reference tag */
322 	to_be32(&pi->ref_tag, (uint32_t)req->lba);
323 
324 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8);
325 	/* is incremented for each subsequent logical block */
326 	to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
327 
328 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
329 
330 	return req->lba_count;
331 }
332 
333 /* Application Tag checked with PRACT setting to 0 */
334 static uint32_t dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *ns,
335 		struct io_request *req,
336 		uint32_t *io_flags)
337 {
338 	struct spdk_nvme_protection_info *pi;
339 	uint32_t md_size, sector_size;
340 
341 	req->lba_count = 1;
342 
343 	/* separate metadata payload for the test case */
344 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
345 		return 0;
346 	}
347 
348 	sector_size = spdk_nvme_ns_get_sector_size(ns);
349 	md_size = spdk_nvme_ns_get_md_size(ns);
350 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
351 				   SPDK_MALLOC_DMA);
352 	assert(req->contig);
353 
354 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
355 				     SPDK_MALLOC_DMA);
356 	assert(req->metadata);
357 
358 	req->lba = 0;
359 	req->use_extended_lba = false;
360 	req->apptag_mask = 0xFFFF;
361 	req->apptag = req->lba_count;
362 
363 	/* last 8 bytes if the metadata size bigger than 8 */
364 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
365 	to_be16(&pi->app_tag, req->lba_count);
366 
367 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_APPTAG;
368 
369 	return req->lba_count;
370 }
371 
372 /*
373  * LBA + Metadata without data protection bits setting,
374  *  separate metadata payload for the test case.
375  */
376 static uint32_t dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
377 		uint32_t *io_flags)
378 {
379 	uint32_t md_size, sector_size;
380 
381 	req->lba_count = 16;
382 
383 	/* separate metadata payload for the test case */
384 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
385 		return 0;
386 	}
387 
388 	sector_size = spdk_nvme_ns_get_sector_size(ns);
389 	md_size = spdk_nvme_ns_get_md_size(ns);
390 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
391 				   SPDK_MALLOC_DMA);
392 	assert(req->contig);
393 
394 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
395 				     SPDK_MALLOC_DMA);
396 	assert(req->metadata);
397 
398 	req->lba = 0;
399 	req->use_extended_lba = false;
400 	*io_flags = 0;
401 
402 	return req->lba_count;
403 }
404 
405 typedef uint32_t (*nvme_build_io_req_fn_t)(struct spdk_nvme_ns *ns, struct io_request *req,
406 		uint32_t *lba_count);
407 
408 static void
409 free_req(struct io_request *req)
410 {
411 	if (req == NULL) {
412 		return;
413 	}
414 
415 	if (req->contig) {
416 		spdk_free(req->contig);
417 	}
418 
419 	if (req->metadata) {
420 		spdk_free(req->metadata);
421 	}
422 
423 	spdk_free(req);
424 }
425 
426 static int
427 ns_data_buffer_compare(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
428 {
429 	uint32_t md_size, sector_size;
430 	uint32_t i, j, offset = 0;
431 	uint8_t *buf;
432 
433 	sector_size = spdk_nvme_ns_get_sector_size(ns);
434 	md_size = spdk_nvme_ns_get_md_size(ns);
435 
436 	for (i = 0; i < req->lba_count; i++) {
437 		if (req->use_extended_lba) {
438 			offset = (sector_size + md_size) * i;
439 		} else {
440 			offset = sector_size * i;
441 		}
442 
443 		buf = (uint8_t *)req->contig + offset;
444 		for (j = 0; j < sector_size; j++) {
445 			if (buf[j] != data_pattern) {
446 				return -1;
447 			}
448 		}
449 	}
450 
451 	return 0;
452 }
453 
454 static int
455 write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
456 {
457 	int rc = 0;
458 	uint32_t lba_count;
459 	uint32_t io_flags = 0;
460 
461 	struct io_request *req;
462 	struct spdk_nvme_ns *ns;
463 	struct spdk_nvme_qpair *qpair;
464 	const struct spdk_nvme_ns_data *nsdata;
465 
466 	ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
467 	if (!ns) {
468 		printf("Null namespace\n");
469 		return 0;
470 	}
471 
472 	if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) {
473 		return 0;
474 	}
475 
476 	nsdata = spdk_nvme_ns_get_data(ns);
477 	if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
478 		fprintf(stderr, "Empty nsdata or wrong sector size\n");
479 		return -EINVAL;
480 	}
481 
482 	req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
483 	assert(req);
484 
485 	/* IO parameters setting */
486 	lba_count = build_io_fn(ns, req, &io_flags);
487 	if (!lba_count) {
488 		printf("%s: %s bypass the test case\n", dev->name, test_name);
489 		free_req(req);
490 		return 0;
491 	}
492 
493 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
494 	if (!qpair) {
495 		free_req(req);
496 		return -1;
497 	}
498 
499 	ns_data_buffer_reset(ns, req, DATA_PATTERN);
500 	if (req->use_extended_lba && req->use_sgl) {
501 		rc = spdk_nvme_ns_cmd_writev(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
502 					     nvme_req_reset_sgl, nvme_req_next_sge);
503 	} else if (req->use_extended_lba) {
504 		rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count,
505 					    io_complete, req, io_flags);
506 	} else {
507 		rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
508 						    io_complete, req, io_flags, req->apptag_mask, req->apptag);
509 	}
510 
511 	if (rc != 0) {
512 		fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name);
513 		spdk_nvme_ctrlr_free_io_qpair(qpair);
514 		free_req(req);
515 		return -1;
516 	}
517 
518 	io_complete_flag = 0;
519 
520 	while (!io_complete_flag) {
521 		spdk_nvme_qpair_process_completions(qpair, 1);
522 	}
523 
524 	if (io_complete_flag != 1) {
525 		fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name);
526 		spdk_nvme_ctrlr_free_io_qpair(qpair);
527 		free_req(req);
528 		return -1;
529 	}
530 
531 	/* reset completion flag */
532 	io_complete_flag = 0;
533 
534 	ns_data_buffer_reset(ns, req, 0);
535 	if (req->use_extended_lba && req->use_sgl) {
536 		rc = spdk_nvme_ns_cmd_readv(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
537 					    nvme_req_reset_sgl, nvme_req_next_sge);
538 
539 	} else if (req->use_extended_lba) {
540 		rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count,
541 					   io_complete, req, io_flags);
542 	} else {
543 		rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
544 						   io_complete, req, io_flags, req->apptag_mask, req->apptag);
545 	}
546 
547 	if (rc != 0) {
548 		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
549 		spdk_nvme_ctrlr_free_io_qpair(qpair);
550 		free_req(req);
551 		return -1;
552 	}
553 
554 	while (!io_complete_flag) {
555 		spdk_nvme_qpair_process_completions(qpair, 1);
556 	}
557 
558 	if (io_complete_flag != 1) {
559 		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
560 		spdk_nvme_ctrlr_free_io_qpair(qpair);
561 		free_req(req);
562 		return -1;
563 	}
564 
565 	rc = ns_data_buffer_compare(ns, req, DATA_PATTERN);
566 	if (rc < 0) {
567 		fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
568 		spdk_nvme_ctrlr_free_io_qpair(qpair);
569 		free_req(req);
570 		return -1;
571 	}
572 
573 	printf("%s: %s test passed\n", dev->name, test_name);
574 	spdk_nvme_ctrlr_free_io_qpair(qpair);
575 	free_req(req);
576 	return 0;
577 }
578 
579 static bool
580 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
581 	 struct spdk_nvme_ctrlr_opts *opts)
582 {
583 	printf("Attaching to %s\n", trid->traddr);
584 
585 	return true;
586 }
587 
588 static void
589 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
590 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
591 {
592 	struct dev *dev;
593 
594 	/* add to dev list */
595 	dev = &devs[num_devs++];
596 
597 	dev->ctrlr = ctrlr;
598 
599 	snprintf(dev->name, sizeof(dev->name), "%s",
600 		 trid->traddr);
601 
602 	printf("Attached to %s\n", dev->name);
603 }
604 
605 int main(int argc, char **argv)
606 {
607 	struct dev		*iter;
608 	int			rc;
609 	struct spdk_env_opts	opts;
610 
611 	spdk_env_opts_init(&opts);
612 	opts.name = "nvme_dp";
613 	opts.core_mask = "0x1";
614 	opts.shm_id = 0;
615 	if (spdk_env_init(&opts) < 0) {
616 		fprintf(stderr, "Unable to initialize SPDK env\n");
617 		return 1;
618 	}
619 
620 	printf("NVMe Write/Read with End-to-End data protection test\n");
621 
622 	if (spdk_nvme_probe(NULL, NULL, probe_cb, attach_cb, NULL) != 0) {
623 		fprintf(stderr, "nvme_probe() failed\n");
624 		exit(1);
625 	}
626 
627 	rc = 0;
628 	foreach_dev(iter) {
629 #define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
630 		if (TEST(dp_with_pract_test)
631 		    || TEST(dp_guard_check_extended_lba_test)
632 		    || TEST(dp_without_pract_extended_lba_test)
633 		    || TEST(dp_without_flags_extended_lba_test)
634 		    || TEST(dp_without_pract_separate_meta_test)
635 		    || TEST(dp_without_pract_separate_meta_apptag_test)
636 		    || TEST(dp_without_flags_separate_meta_test)) {
637 #undef TEST
638 			rc = 1;
639 			printf("%s: failed End-to-End data protection tests\n", iter->name);
640 		}
641 	}
642 
643 	printf("Cleaning up...\n");
644 
645 	foreach_dev(iter) {
646 		spdk_nvme_detach(iter->ctrlr);
647 	}
648 
649 	return rc;
650 }
651