xref: /spdk/test/nvme/e2edp/nvme_dp.c (revision 57fd99b91e71a4baa5543e19ff83958dc99d4dac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 /*
7  * NVMe end-to-end data protection test
8  */
9 
10 #include "spdk/stdinc.h"
11 
12 #include "spdk/nvme.h"
13 #include "spdk/env.h"
14 #include "spdk/crc16.h"
15 #include "spdk/endian.h"
16 #include "spdk/memory.h"
17 
18 #define MAX_DEVS 64
19 
20 #define DATA_PATTERN 0x5A
21 
22 struct dev {
23 	struct spdk_nvme_ctrlr			*ctrlr;
24 	char					name[SPDK_NVMF_TRADDR_MAX_LEN + 1];
25 };
26 
27 static struct dev devs[MAX_DEVS];
28 static int num_devs = 0;
29 static struct spdk_nvme_transport_id g_trid = {};
30 
31 #define foreach_dev(iter) \
32 	for (iter = devs; iter - devs < num_devs; iter++)
33 
34 static int io_complete_flag = 0;
35 
36 struct io_request {
37 	void *contig;
38 	void *metadata;
39 	bool use_extended_lba;
40 	bool use_sgl;
41 	uint32_t sgl_offset;
42 	uint32_t buf_size;
43 	uint64_t lba;
44 	uint32_t lba_count;
45 	uint16_t apptag_mask;
46 	uint16_t apptag;
47 };
48 
49 static void
50 io_complete(void *ctx, const struct spdk_nvme_cpl *cpl)
51 {
52 	if (spdk_nvme_cpl_is_error(cpl)) {
53 		io_complete_flag = 2;
54 	} else {
55 		io_complete_flag = 1;
56 	}
57 }
58 
59 static void
60 ns_data_buffer_reset(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
61 {
62 	uint32_t md_size, sector_size;
63 	uint32_t i, offset = 0;
64 	uint8_t *buf;
65 
66 	sector_size = spdk_nvme_ns_get_sector_size(ns);
67 	md_size = spdk_nvme_ns_get_md_size(ns);
68 
69 	for (i = 0; i < req->lba_count; i++) {
70 		if (req->use_extended_lba) {
71 			offset = (sector_size + md_size) * i;
72 		} else {
73 			offset = sector_size * i;
74 		}
75 
76 		buf = (uint8_t *)req->contig + offset;
77 		memset(buf, data_pattern, sector_size);
78 	}
79 }
80 
81 static void
82 nvme_req_reset_sgl(void *cb_arg, uint32_t sgl_offset)
83 {
84 	struct io_request *req = (struct io_request *)cb_arg;
85 
86 	req->sgl_offset = sgl_offset;
87 	return;
88 }
89 
90 static int
91 nvme_req_next_sge(void *cb_arg, void **address, uint32_t *length)
92 {
93 	struct io_request *req = (struct io_request *)cb_arg;
94 	void *payload;
95 
96 	payload = req->contig + req->sgl_offset;
97 	*address = payload;
98 
99 	*length = req->buf_size - req->sgl_offset;
100 
101 	return 0;
102 }
103 
104 /* CRC-16 Guard checked for extended lba format */
105 static uint32_t
106 dp_guard_check_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
107 				 uint32_t *io_flags)
108 {
109 	struct spdk_nvme_protection_info *pi;
110 	uint32_t md_size, sector_size, chksum_size;
111 
112 	req->lba_count = 2;
113 
114 	/* extended LBA only for the test case */
115 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
116 		return 0;
117 	}
118 
119 	sector_size = spdk_nvme_ns_get_sector_size(ns);
120 	md_size = spdk_nvme_ns_get_md_size(ns);
121 	chksum_size = sector_size + md_size - 8;
122 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
123 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
124 	assert(req->contig);
125 
126 	req->lba = 0;
127 	req->use_extended_lba = true;
128 	req->use_sgl = true;
129 	req->buf_size = (sector_size + md_size) * req->lba_count;
130 	req->metadata = NULL;
131 	ns_data_buffer_reset(ns, req, DATA_PATTERN);
132 	pi = (struct spdk_nvme_protection_info *)(req->contig + chksum_size);
133 	/* big-endian for guard */
134 	to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig, chksum_size));
135 
136 	pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
137 	to_be16(&pi->guard, spdk_crc16_t10dif(0, req->contig + sector_size + md_size, chksum_size));
138 
139 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD;
140 
141 	return req->lba_count;
142 }
143 
144 /*
145  * No protection information with PRACT setting to 1,
146  *  both extended LBA format and separate metadata can
147  *  run the test case.
148  */
149 static uint32_t
150 dp_with_pract_test(struct spdk_nvme_ns *ns, struct io_request *req,
151 		   uint32_t *io_flags)
152 {
153 	uint32_t md_size, sector_size, data_len;
154 
155 	req->lba_count = 8;
156 	req->use_extended_lba = spdk_nvme_ns_supports_extended_lba(ns) ? true : false;
157 
158 	sector_size = spdk_nvme_ns_get_sector_size(ns);
159 	md_size = spdk_nvme_ns_get_md_size(ns);
160 	if (md_size == 8) {
161 		/* No additional metadata buffer provided */
162 		data_len = sector_size * req->lba_count;
163 		req->use_extended_lba = false;
164 	} else {
165 		data_len = (sector_size + md_size) * req->lba_count;
166 	}
167 	req->contig = spdk_zmalloc(data_len, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
168 				   SPDK_MALLOC_DMA);
169 	assert(req->contig);
170 
171 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
172 				     SPDK_MALLOC_DMA);
173 	assert(req->metadata);
174 
175 	switch (spdk_nvme_ns_get_pi_type(ns)) {
176 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
177 		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRACT;
178 		break;
179 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE1:
180 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE2:
181 		*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_GUARD | SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
182 			    SPDK_NVME_IO_FLAGS_PRACT;
183 		break;
184 	default:
185 		*io_flags = 0;
186 		break;
187 	}
188 
189 	req->lba = 0;
190 
191 	return req->lba_count;
192 }
193 
194 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
195 static uint32_t
196 dp_without_pract_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
197 				   uint32_t *io_flags)
198 {
199 	struct spdk_nvme_protection_info *pi;
200 	uint32_t md_size, sector_size;
201 
202 	req->lba_count = 2;
203 
204 	switch (spdk_nvme_ns_get_pi_type(ns)) {
205 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
206 		return 0;
207 	default:
208 		break;
209 	}
210 
211 	/* extended LBA only for the test case */
212 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
213 		return 0;
214 	}
215 
216 	sector_size = spdk_nvme_ns_get_sector_size(ns);
217 	md_size = spdk_nvme_ns_get_md_size(ns);
218 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
219 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
220 	assert(req->contig);
221 
222 	req->lba = 0;
223 	req->use_extended_lba = true;
224 	req->metadata = NULL;
225 	pi = (struct spdk_nvme_protection_info *)(req->contig + sector_size + md_size - 8);
226 	/* big-endian for reference tag */
227 	to_be32(&pi->ref_tag, (uint32_t)req->lba);
228 
229 	pi = (struct spdk_nvme_protection_info *)(req->contig + (sector_size + md_size) * 2 - 8);
230 	/* is incremented for each subsequent logical block */
231 	to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
232 
233 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
234 
235 	return req->lba_count;
236 }
237 
238 /* LBA + Metadata without data protection bits setting */
239 static uint32_t
240 dp_without_flags_extended_lba_test(struct spdk_nvme_ns *ns, struct io_request *req,
241 				   uint32_t *io_flags)
242 {
243 	uint32_t md_size, sector_size;
244 
245 	req->lba_count = 16;
246 
247 	/* extended LBA only for the test case */
248 	if (!(spdk_nvme_ns_supports_extended_lba(ns))) {
249 		return 0;
250 	}
251 
252 	sector_size = spdk_nvme_ns_get_sector_size(ns);
253 	md_size = spdk_nvme_ns_get_md_size(ns);
254 	req->contig = spdk_zmalloc((sector_size + md_size) * req->lba_count, 0x1000, NULL,
255 				   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
256 	assert(req->contig);
257 
258 	req->lba = 0;
259 	req->use_extended_lba = true;
260 	req->metadata = NULL;
261 	*io_flags = 0;
262 
263 	return req->lba_count;
264 }
265 
266 /* Block Reference Tag checked for TYPE1 and TYPE2 with PRACT setting to 0 */
267 static uint32_t
268 dp_without_pract_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
269 				    uint32_t *io_flags)
270 {
271 	struct spdk_nvme_protection_info *pi;
272 	uint32_t md_size, sector_size;
273 
274 	req->lba_count = 2;
275 
276 	switch (spdk_nvme_ns_get_pi_type(ns)) {
277 	case SPDK_NVME_FMT_NVM_PROTECTION_TYPE3:
278 		return 0;
279 	default:
280 		break;
281 	}
282 
283 	/* separate metadata payload for the test case */
284 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
285 		return 0;
286 	}
287 
288 	sector_size = spdk_nvme_ns_get_sector_size(ns);
289 	md_size = spdk_nvme_ns_get_md_size(ns);
290 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
291 				   SPDK_MALLOC_DMA);
292 	assert(req->contig);
293 
294 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
295 				     SPDK_MALLOC_DMA);
296 	assert(req->metadata);
297 
298 	req->lba = 0;
299 	req->use_extended_lba = false;
300 
301 	/* last 8 bytes if the metadata size bigger than 8 */
302 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
303 	/* big-endian for reference tag */
304 	to_be32(&pi->ref_tag, (uint32_t)req->lba);
305 
306 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size * 2 - 8);
307 	/* is incremented for each subsequent logical block */
308 	to_be32(&pi->ref_tag, (uint32_t)(req->lba + 1));
309 
310 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
311 
312 	return req->lba_count;
313 }
314 
315 /* Application Tag checked with PRACT setting to 0 */
316 static uint32_t
317 dp_without_pract_separate_meta_apptag_test(struct spdk_nvme_ns *ns,
318 		struct io_request *req,
319 		uint32_t *io_flags)
320 {
321 	struct spdk_nvme_protection_info *pi;
322 	uint32_t md_size, sector_size;
323 
324 	req->lba_count = 1;
325 
326 	/* separate metadata payload for the test case */
327 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
328 		return 0;
329 	}
330 
331 	sector_size = spdk_nvme_ns_get_sector_size(ns);
332 	md_size = spdk_nvme_ns_get_md_size(ns);
333 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
334 				   SPDK_MALLOC_DMA);
335 	assert(req->contig);
336 
337 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
338 				     SPDK_MALLOC_DMA);
339 	assert(req->metadata);
340 
341 	req->lba = 0;
342 	req->use_extended_lba = false;
343 	req->apptag_mask = 0xFFFF;
344 	req->apptag = req->lba_count;
345 
346 	/* last 8 bytes if the metadata size bigger than 8 */
347 	pi = (struct spdk_nvme_protection_info *)(req->metadata + md_size - 8);
348 	to_be16(&pi->app_tag, req->lba_count);
349 
350 	*io_flags = SPDK_NVME_IO_FLAGS_PRCHK_APPTAG;
351 
352 	return req->lba_count;
353 }
354 
355 /*
356  * LBA + Metadata without data protection bits setting,
357  *  separate metadata payload for the test case.
358  */
359 static uint32_t
360 dp_without_flags_separate_meta_test(struct spdk_nvme_ns *ns, struct io_request *req,
361 				    uint32_t *io_flags)
362 {
363 	uint32_t md_size, sector_size;
364 
365 	req->lba_count = 16;
366 
367 	/* separate metadata payload for the test case */
368 	if (spdk_nvme_ns_supports_extended_lba(ns)) {
369 		return 0;
370 	}
371 
372 	sector_size = spdk_nvme_ns_get_sector_size(ns);
373 	md_size = spdk_nvme_ns_get_md_size(ns);
374 	req->contig = spdk_zmalloc(sector_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
375 				   SPDK_MALLOC_DMA);
376 	assert(req->contig);
377 
378 	req->metadata = spdk_zmalloc(md_size * req->lba_count, 0x1000, NULL, SPDK_ENV_LCORE_ID_ANY,
379 				     SPDK_MALLOC_DMA);
380 	assert(req->metadata);
381 
382 	req->lba = 0;
383 	req->use_extended_lba = false;
384 	*io_flags = 0;
385 
386 	return req->lba_count;
387 }
388 
389 typedef uint32_t (*nvme_build_io_req_fn_t)(struct spdk_nvme_ns *ns, struct io_request *req,
390 		uint32_t *lba_count);
391 
392 static void
393 free_req(struct io_request *req)
394 {
395 	if (req == NULL) {
396 		return;
397 	}
398 
399 	if (req->contig) {
400 		spdk_free(req->contig);
401 	}
402 
403 	if (req->metadata) {
404 		spdk_free(req->metadata);
405 	}
406 
407 	spdk_free(req);
408 }
409 
410 static int
411 ns_data_buffer_compare(struct spdk_nvme_ns *ns, struct io_request *req, uint8_t data_pattern)
412 {
413 	uint32_t md_size, sector_size;
414 	uint32_t i, j, offset = 0;
415 	uint8_t *buf;
416 
417 	sector_size = spdk_nvme_ns_get_sector_size(ns);
418 	md_size = spdk_nvme_ns_get_md_size(ns);
419 
420 	for (i = 0; i < req->lba_count; i++) {
421 		if (req->use_extended_lba) {
422 			offset = (sector_size + md_size) * i;
423 		} else {
424 			offset = sector_size * i;
425 		}
426 
427 		buf = (uint8_t *)req->contig + offset;
428 		for (j = 0; j < sector_size; j++) {
429 			if (buf[j] != data_pattern) {
430 				return -1;
431 			}
432 		}
433 	}
434 
435 	return 0;
436 }
437 
438 static int
439 write_read_e2e_dp_tests(struct dev *dev, nvme_build_io_req_fn_t build_io_fn, const char *test_name)
440 {
441 	int rc = 0;
442 	uint32_t lba_count;
443 	uint32_t io_flags = 0;
444 
445 	struct io_request *req;
446 	struct spdk_nvme_ns *ns;
447 	struct spdk_nvme_qpair *qpair;
448 	const struct spdk_nvme_ns_data *nsdata;
449 
450 	ns = spdk_nvme_ctrlr_get_ns(dev->ctrlr, 1);
451 	if (!ns) {
452 		printf("Null namespace\n");
453 		return 0;
454 	}
455 
456 	if (!(spdk_nvme_ns_get_flags(ns) & SPDK_NVME_NS_DPS_PI_SUPPORTED)) {
457 		return 0;
458 	}
459 
460 	nsdata = spdk_nvme_ns_get_data(ns);
461 	if (!nsdata || !spdk_nvme_ns_get_sector_size(ns)) {
462 		fprintf(stderr, "Empty nsdata or wrong sector size\n");
463 		return -EINVAL;
464 	}
465 
466 	req = spdk_zmalloc(sizeof(*req), 0, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
467 	assert(req);
468 
469 	/* IO parameters setting */
470 	lba_count = build_io_fn(ns, req, &io_flags);
471 	if (!lba_count) {
472 		printf("%s: %s bypass the test case\n", dev->name, test_name);
473 		free_req(req);
474 		return 0;
475 	}
476 
477 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(dev->ctrlr, NULL, 0);
478 	if (!qpair) {
479 		free_req(req);
480 		return -1;
481 	}
482 
483 	ns_data_buffer_reset(ns, req, DATA_PATTERN);
484 	if (req->use_extended_lba && req->use_sgl) {
485 		rc = spdk_nvme_ns_cmd_writev(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
486 					     nvme_req_reset_sgl, nvme_req_next_sge);
487 	} else if (req->use_extended_lba) {
488 		rc = spdk_nvme_ns_cmd_write(ns, qpair, req->contig, req->lba, lba_count,
489 					    io_complete, req, io_flags);
490 	} else {
491 		rc = spdk_nvme_ns_cmd_write_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
492 						    io_complete, req, io_flags, req->apptag_mask, req->apptag);
493 	}
494 
495 	if (rc != 0) {
496 		fprintf(stderr, "%s: %s write submit failed\n", dev->name, test_name);
497 		spdk_nvme_ctrlr_free_io_qpair(qpair);
498 		free_req(req);
499 		return -1;
500 	}
501 
502 	io_complete_flag = 0;
503 
504 	while (!io_complete_flag) {
505 		spdk_nvme_qpair_process_completions(qpair, 1);
506 	}
507 
508 	if (io_complete_flag != 1) {
509 		fprintf(stderr, "%s: %s write exec failed\n", dev->name, test_name);
510 		spdk_nvme_ctrlr_free_io_qpair(qpair);
511 		free_req(req);
512 		return -1;
513 	}
514 
515 	/* reset completion flag */
516 	io_complete_flag = 0;
517 
518 	ns_data_buffer_reset(ns, req, 0);
519 	if (req->use_extended_lba && req->use_sgl) {
520 		rc = spdk_nvme_ns_cmd_readv(ns, qpair, req->lba, lba_count, io_complete, req, io_flags,
521 					    nvme_req_reset_sgl, nvme_req_next_sge);
522 
523 	} else if (req->use_extended_lba) {
524 		rc = spdk_nvme_ns_cmd_read(ns, qpair, req->contig, req->lba, lba_count,
525 					   io_complete, req, io_flags);
526 	} else {
527 		rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, req->contig, req->metadata, req->lba, lba_count,
528 						   io_complete, req, io_flags, req->apptag_mask, req->apptag);
529 	}
530 
531 	if (rc != 0) {
532 		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
533 		spdk_nvme_ctrlr_free_io_qpair(qpair);
534 		free_req(req);
535 		return -1;
536 	}
537 
538 	while (!io_complete_flag) {
539 		spdk_nvme_qpair_process_completions(qpair, 1);
540 	}
541 
542 	if (io_complete_flag != 1) {
543 		fprintf(stderr, "%s: %s read failed\n", dev->name, test_name);
544 		spdk_nvme_ctrlr_free_io_qpair(qpair);
545 		free_req(req);
546 		return -1;
547 	}
548 
549 	rc = ns_data_buffer_compare(ns, req, DATA_PATTERN);
550 	if (rc < 0) {
551 		fprintf(stderr, "%s: %s write/read success, but memcmp Failed\n", dev->name, test_name);
552 		spdk_nvme_ctrlr_free_io_qpair(qpair);
553 		free_req(req);
554 		return -1;
555 	}
556 
557 	printf("%s: %s test passed\n", dev->name, test_name);
558 	spdk_nvme_ctrlr_free_io_qpair(qpair);
559 	free_req(req);
560 	return 0;
561 }
562 
563 static bool
564 probe_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
565 	 struct spdk_nvme_ctrlr_opts *opts)
566 {
567 	printf("Attaching to %s\n", trid->traddr);
568 
569 	return true;
570 }
571 
572 static void
573 add_ctrlr(struct spdk_nvme_ctrlr *ctrlr)
574 {
575 	struct dev *dev;
576 
577 	/* add to dev list */
578 	dev = &devs[num_devs++];
579 
580 	dev->ctrlr = ctrlr;
581 
582 	snprintf(dev->name, sizeof(dev->name), "%s",
583 		 spdk_nvme_ctrlr_get_transport_id(ctrlr)->traddr);
584 
585 	printf("Attached to %s\n", dev->name);
586 }
587 
588 static void
589 attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
590 	  struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
591 {
592 	add_ctrlr(ctrlr);
593 }
594 
595 static int
596 parse_args(int argc, char **argv)
597 {
598 	int op;
599 
600 	spdk_nvme_trid_populate_transport(&g_trid, SPDK_NVME_TRANSPORT_PCIE);
601 	snprintf(g_trid.subnqn, sizeof(g_trid.subnqn), "%s", SPDK_NVMF_DISCOVERY_NQN);
602 
603 	while ((op = getopt(argc, argv, "r:")) != -1) {
604 		switch (op) {
605 		case 'r':
606 			if (spdk_nvme_transport_id_parse(&g_trid, optarg) != 0) {
607 				fprintf(stderr, "Error parsing transport address\n");
608 				return -EINVAL;
609 			}
610 			break;
611 		default:
612 			fprintf(stderr, "Usage: %s [-r trid]\n", argv[0]);
613 			return -EINVAL;
614 		}
615 	}
616 
617 	return 0;
618 }
619 
620 int
621 main(int argc, char **argv)
622 {
623 	struct dev		*iter;
624 	int			rc;
625 	struct spdk_env_opts	opts;
626 	struct spdk_nvme_detach_ctx *detach_ctx = NULL;
627 	struct spdk_nvme_ctrlr	*ctrlr;
628 
629 	opts.opts_size = sizeof(opts);
630 	spdk_env_opts_init(&opts);
631 	opts.name = "nvme_dp";
632 	opts.core_mask = "0x1";
633 	opts.shm_id = 0;
634 	if (spdk_env_init(&opts) < 0) {
635 		fprintf(stderr, "Unable to initialize SPDK env\n");
636 		return 1;
637 	}
638 
639 	if (parse_args(argc, argv) != 0) {
640 		return 1;
641 	}
642 
643 	printf("NVMe Write/Read with End-to-End data protection test\n");
644 
645 	if (g_trid.traddr[0] != '\0') {
646 		ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
647 		if (ctrlr == NULL) {
648 			fprintf(stderr, "nvme_connect() failed\n");
649 			return 1;
650 		}
651 
652 		add_ctrlr(ctrlr);
653 	} else if (spdk_nvme_probe(&g_trid, NULL, probe_cb, attach_cb, NULL) != 0) {
654 		fprintf(stderr, "nvme_probe() failed\n");
655 		exit(1);
656 	}
657 
658 	if (num_devs == 0) {
659 		fprintf(stderr, "No valid NVMe controllers found\n");
660 		return 1;
661 	}
662 
663 	rc = 0;
664 	foreach_dev(iter) {
665 #define TEST(x) write_read_e2e_dp_tests(iter, x, #x)
666 		if (TEST(dp_with_pract_test)
667 		    || TEST(dp_guard_check_extended_lba_test)
668 		    || TEST(dp_without_pract_extended_lba_test)
669 		    || TEST(dp_without_flags_extended_lba_test)
670 		    || TEST(dp_without_pract_separate_meta_test)
671 		    || TEST(dp_without_pract_separate_meta_apptag_test)
672 		    || TEST(dp_without_flags_separate_meta_test)) {
673 #undef TEST
674 			rc = 1;
675 			printf("%s: failed End-to-End data protection tests\n", iter->name);
676 		}
677 	}
678 
679 	printf("Cleaning up...\n");
680 
681 	foreach_dev(iter) {
682 		spdk_nvme_detach_async(iter->ctrlr, &detach_ctx);
683 	}
684 
685 	if (detach_ctx) {
686 		spdk_nvme_detach_poll(detach_ctx);
687 	}
688 
689 	return rc;
690 }
691