xref: /spdk/test/nvme/compliance/nvme_compliance.c (revision b3bec07939ebe2ea2e0c43931705d32aa9e06719)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
3  */
4 
5 #include "spdk/stdinc.h"
6 #include "spdk_internal/cunit.h"
7 #include "spdk/log.h"
8 #include "spdk/util.h"
9 #include "spdk/nvme.h"
10 #include "spdk/string.h"
11 
12 static struct spdk_nvme_transport_id g_trid;
13 static const char *g_trid_str;
14 
15 struct status {
16 	bool			done;
17 	struct spdk_nvme_cpl	cpl;
18 };
19 
20 static inline uint64_t
21 nvme_vtophys(struct spdk_nvme_transport_id *trid, const void *buf, uint64_t *size)
22 {
23 	/* vfio-user address translation with IOVA=VA mode */
24 	if (trid->trtype != SPDK_NVME_TRANSPORT_VFIOUSER) {
25 		return spdk_vtophys(buf, size);
26 	} else {
27 		return (uint64_t)(uintptr_t)buf;
28 	}
29 }
30 
31 static void
32 wait_for_admin_completion(struct status *s, struct spdk_nvme_ctrlr *ctrlr)
33 {
34 	/* Timeout if command does not complete within 1 second. */
35 	uint64_t timeout = spdk_get_ticks() + spdk_get_ticks_hz();
36 
37 	while (!s->done && spdk_get_ticks() < timeout) {
38 		spdk_nvme_ctrlr_process_admin_completions(ctrlr);
39 	}
40 
41 	if (!s->done) {
42 		CU_ASSERT(false && "completion timeout");
43 	}
44 }
45 
46 static void
47 wait_for_io_completion(struct status *s, struct spdk_nvme_qpair *qpair)
48 {
49 	/* Timeout if command does not complete within 1 second. */
50 	uint64_t timeout = spdk_get_ticks() + spdk_get_ticks_hz();
51 
52 	while (!s->done && spdk_get_ticks() < timeout) {
53 		spdk_nvme_qpair_process_completions(qpair, 0);
54 	}
55 
56 	if (!s->done) {
57 		CU_ASSERT(false && "completion timeout");
58 	}
59 }
60 
61 static void
62 test_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
63 {
64 	struct status *s = ctx;
65 
66 	s->done = true;
67 	s->cpl = *cpl;
68 }
69 
70 /* Test that target correctly handles various IDENTIFY CNS=1 requests. */
71 static void
72 admin_identify_ctrlr_verify_dptr(void)
73 {
74 	struct spdk_nvme_ctrlr *ctrlr;
75 	struct spdk_nvme_cmd cmd;
76 	struct spdk_nvme_ctrlr_data *ctrlr_data;
77 	struct status s;
78 	int rc;
79 
80 	/* Allocate data buffer with 4KiB alignment, since we need to test some
81 	 * very specific PRP cases.
82 	 */
83 	ctrlr_data = spdk_dma_zmalloc(sizeof(*ctrlr_data), 4096, NULL);
84 	SPDK_CU_ASSERT_FATAL(ctrlr_data != NULL);
85 
86 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
87 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
88 	SPDK_CU_ASSERT_FATAL(ctrlr);
89 
90 	memset(&cmd, 0, sizeof(cmd));
91 	cmd.opc = SPDK_NVME_OPC_IDENTIFY;
92 	cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_CTRLR;
93 
94 	/* Test a properly formed IDENTIFY CNS=1 request. */
95 	s.done = false;
96 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ctrlr_data,
97 					   sizeof(*ctrlr_data), test_cb, &s);
98 	CU_ASSERT(rc == 0);
99 
100 	wait_for_admin_completion(&s, ctrlr);
101 
102 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
103 
104 	/* Confirm the target fails an IDENTIFY CNS=1 request with incorrect
105 	 * DPTR lengths.
106 	 */
107 	s.done = false;
108 
109 	/* Only specify 1KiB of data, and make sure it specifies a PRP offset
110 	 * that's 1KiB before the end of the buffer previously allocated.
111 	 *
112 	 * The controller needs to recognize that a full 4KiB of data was not
113 	 * specified in the PRPs, and should fail the command.
114 	 */
115 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd,
116 					   ((uint8_t *)ctrlr_data) + (4096 - 1024),
117 					   1024, test_cb, &s);
118 	CU_ASSERT(rc == 0);
119 
120 	wait_for_admin_completion(&s, ctrlr);
121 
122 	CU_ASSERT(spdk_nvme_cpl_is_error(&s.cpl));
123 
124 	spdk_dma_free(ctrlr_data);
125 	spdk_nvme_detach(ctrlr);
126 }
127 
128 /* Test that target correctly fails admin commands with fuse != 0 */
129 static void
130 admin_identify_ctrlr_verify_fused(void)
131 {
132 	struct spdk_nvme_ctrlr *ctrlr;
133 	struct spdk_nvme_cmd cmd;
134 	struct spdk_nvme_ctrlr_data *ctrlr_data;
135 	struct status s, s2;
136 	int rc;
137 
138 	ctrlr_data = spdk_dma_zmalloc(sizeof(*ctrlr_data), 0, NULL);
139 	SPDK_CU_ASSERT_FATAL(ctrlr_data != NULL);
140 
141 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
142 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
143 	SPDK_CU_ASSERT_FATAL(ctrlr);
144 
145 	/* The nvme driver waits until it sees both fused commands before submitting
146 	 * both to the queue - so construct two commands here and then check the
147 	 * both are completed with error status.
148 	 */
149 	memset(&cmd, 0, sizeof(cmd));
150 	cmd.opc = SPDK_NVME_OPC_IDENTIFY;
151 	cmd.fuse = 0x1;
152 	cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_CTRLR;
153 
154 	s.done = false;
155 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ctrlr_data,
156 					   sizeof(*ctrlr_data), test_cb, &s);
157 	CU_ASSERT(rc == 0);
158 
159 	cmd.fuse = 0x2;
160 	s2.done = false;
161 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ctrlr_data,
162 					   sizeof(*ctrlr_data), test_cb, &s2);
163 	CU_ASSERT(rc == 0);
164 
165 	wait_for_admin_completion(&s, ctrlr);
166 	wait_for_admin_completion(&s2, ctrlr);
167 
168 	CU_ASSERT(spdk_nvme_cpl_is_error(&s.cpl));
169 	CU_ASSERT(spdk_nvme_cpl_is_error(&s2.cpl));
170 
171 	spdk_nvme_detach(ctrlr);
172 	spdk_free(ctrlr_data);
173 }
174 
175 /* Test that target correctly handles requests to delete admin SQ/CQ (QID = 0).
176  * Associated with issue #2172.
177  */
178 static void
179 admin_delete_io_sq_use_admin_qid(void)
180 {
181 	struct spdk_nvme_ctrlr *ctrlr;
182 	struct spdk_nvme_cmd cmd;
183 	struct status s;
184 	int rc;
185 
186 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
187 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
188 	SPDK_CU_ASSERT_FATAL(ctrlr);
189 
190 	/* Try deleting SQ for QID 0 (admin queue).  This is invalid. */
191 	memset(&cmd, 0, sizeof(cmd));
192 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
193 	cmd.cdw10_bits.delete_io_q.qid = 0; /* admin queue */
194 
195 	s.done = false;
196 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
197 	CU_ASSERT(rc == 0);
198 
199 	wait_for_admin_completion(&s, ctrlr);
200 
201 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
202 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
203 
204 	spdk_nvme_detach(ctrlr);
205 }
206 
207 static void
208 admin_delete_io_cq_use_admin_qid(void)
209 {
210 	struct spdk_nvme_ctrlr *ctrlr;
211 	struct spdk_nvme_cmd cmd;
212 	struct status s;
213 	int rc;
214 
215 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
216 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
217 	SPDK_CU_ASSERT_FATAL(ctrlr);
218 
219 	/* Try deleting CQ for QID 0 (admin queue).  This is invalid. */
220 	memset(&cmd, 0, sizeof(cmd));
221 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
222 	cmd.cdw10_bits.delete_io_q.qid = 0; /* admin queue */
223 
224 	s.done = false;
225 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
226 	CU_ASSERT(rc == 0);
227 
228 	wait_for_admin_completion(&s, ctrlr);
229 
230 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
231 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
232 
233 	spdk_nvme_detach(ctrlr);
234 }
235 
236 static void
237 admin_delete_io_sq_delete_sq_twice(void)
238 {
239 	struct spdk_nvme_ctrlr *ctrlr;
240 	struct spdk_nvme_io_qpair_opts opts;
241 	struct spdk_nvme_qpair *qpair;
242 	struct spdk_nvme_ns *ns;
243 	uint32_t nsid;
244 	struct spdk_nvme_cmd cmd;
245 	struct status s;
246 	void *buf;
247 	uint32_t nlbas;
248 	int rc;
249 
250 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
251 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
252 	SPDK_CU_ASSERT_FATAL(ctrlr);
253 
254 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
255 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
256 	SPDK_CU_ASSERT_FATAL(qpair);
257 
258 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
259 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
260 	SPDK_CU_ASSERT_FATAL(ns != NULL);
261 
262 	/* READ command should execute successfully. */
263 	nlbas = 1;
264 	buf = spdk_dma_zmalloc(nlbas * spdk_nvme_ns_get_sector_size(ns), 0x1000,  NULL);
265 	SPDK_CU_ASSERT_FATAL(buf != NULL);
266 	s.done = false;
267 	rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, buf, NULL, 0, nlbas, test_cb, &s, 0, 0, 0);
268 	SPDK_CU_ASSERT_FATAL(rc == 0);
269 
270 	wait_for_io_completion(&s, qpair);
271 
272 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
273 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
274 
275 	/* Delete SQ 1, this is valid. */
276 	memset(&cmd, 0, sizeof(cmd));
277 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
278 	cmd.cdw10_bits.delete_io_q.qid = 1;
279 
280 	s.done = false;
281 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
282 	CU_ASSERT(rc == 0);
283 
284 	wait_for_admin_completion(&s, ctrlr);
285 
286 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
287 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
288 
289 	/* Try deleting SQ 1 again, this is invalid. */
290 	s.done = false;
291 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
292 	CU_ASSERT(rc == 0);
293 
294 	wait_for_admin_completion(&s, ctrlr);
295 
296 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
297 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
298 
299 	/* Delete CQ 1, this is valid. */
300 	memset(&cmd, 0, sizeof(cmd));
301 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
302 	cmd.cdw10_bits.delete_io_q.qid = 1;
303 
304 	s.done = false;
305 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
306 	CU_ASSERT(rc == 0);
307 
308 	wait_for_admin_completion(&s, ctrlr);
309 
310 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
311 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
312 
313 	spdk_dma_free(buf);
314 	spdk_nvme_detach(ctrlr);
315 }
316 
317 static void
318 admin_create_io_sq_verify_qsize_cqid(void)
319 {
320 	struct spdk_nvme_ctrlr *ctrlr;
321 	struct spdk_nvme_io_qpair_opts opts;
322 	struct spdk_nvme_qpair *qpair;
323 	union spdk_nvme_cap_register cap;
324 	struct spdk_nvme_ns *ns;
325 	uint32_t nsid, mqes;
326 	struct spdk_nvme_cmd cmd;
327 	struct status s;
328 	void *buf;
329 	uint32_t nlbas;
330 	uint64_t dma_addr;
331 	uint32_t ncqr;
332 	int rc;
333 
334 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
335 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
336 	SPDK_CU_ASSERT_FATAL(ctrlr);
337 
338 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
339 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
340 	SPDK_CU_ASSERT_FATAL(qpair);
341 
342 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
343 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
344 	SPDK_CU_ASSERT_FATAL(ns != NULL);
345 
346 	/* READ command should execute successfully. */
347 	nlbas = 1;
348 	buf = spdk_dma_zmalloc(nlbas * spdk_nvme_ns_get_sector_size(ns), 0x1000,  NULL);
349 	SPDK_CU_ASSERT_FATAL(buf != NULL);
350 	s.done = false;
351 	rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, buf, NULL, 0, nlbas, test_cb, &s, 0, 0, 0);
352 	SPDK_CU_ASSERT_FATAL(rc == 0);
353 
354 	wait_for_io_completion(&s, qpair);
355 
356 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
357 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
358 	spdk_dma_free(buf);
359 
360 	memset(&cmd, 0, sizeof(cmd));
361 	cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
362 	/* Get Maximum Number of CQs */
363 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
364 
365 	s.done = false;
366 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
367 	CU_ASSERT(rc == 0);
368 	wait_for_admin_completion(&s, ctrlr);
369 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
370 
371 	ncqr = ((s.cpl.cdw0 & 0xffff0000u) >> 16) + 1;
372 
373 	/* Delete SQ 1, this is valid. */
374 	memset(&cmd, 0, sizeof(cmd));
375 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
376 	cmd.cdw10_bits.delete_io_q.qid = 1;
377 
378 	s.done = false;
379 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
380 	CU_ASSERT(rc == 0);
381 
382 	wait_for_admin_completion(&s, ctrlr);
383 
384 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
385 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
386 
387 	cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
388 	mqes = cap.bits.mqes + 1;
389 	buf = spdk_dma_zmalloc(mqes * sizeof(cmd), 0x1000,  NULL);
390 	SPDK_CU_ASSERT_FATAL(buf != NULL);
391 
392 	/* Create SQ 1 again, qsize is 0, this is invalid. */
393 	memset(&cmd, 0, sizeof(cmd));
394 	cmd.opc = SPDK_NVME_OPC_CREATE_IO_SQ;
395 	cmd.cdw10_bits.create_io_q.qid = 1;
396 	cmd.cdw10_bits.create_io_q.qsize = 0; /* 0 based value */
397 	cmd.cdw11_bits.create_io_sq.pc = 1;
398 	cmd.cdw11_bits.create_io_sq.cqid = 1;
399 	dma_addr = nvme_vtophys(&g_trid, buf, NULL);
400 	SPDK_CU_ASSERT_FATAL(dma_addr != SPDK_VTOPHYS_ERROR);
401 	cmd.dptr.prp.prp1 = dma_addr;
402 
403 	s.done = false;
404 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
405 	CU_ASSERT(rc == 0);
406 
407 	wait_for_admin_completion(&s, ctrlr);
408 
409 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
410 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_SIZE);
411 
412 	/* Create SQ 1 again, qsize is MQES + 1, this is invalid. */
413 	cmd.cdw10_bits.create_io_q.qsize = (uint16_t)mqes; /* 0 based value */
414 	s.done = false;
415 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
416 	CU_ASSERT(rc == 0);
417 
418 	wait_for_admin_completion(&s, ctrlr);
419 
420 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
421 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_SIZE);
422 
423 	/* Create SQ 1 again, CQID is 0, this is invalid. */
424 	cmd.cdw10_bits.create_io_q.qsize = cap.bits.mqes; /* 0 based value, valid */
425 	cmd.cdw11_bits.create_io_sq.cqid = 0;
426 	s.done = false;
427 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
428 	CU_ASSERT(rc == 0);
429 
430 	wait_for_admin_completion(&s, ctrlr);
431 
432 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
433 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
434 
435 	/* Create SQ 1 again, CQID is NCQR + 1, this is invalid. */
436 	cmd.cdw11_bits.create_io_sq.cqid = ncqr + 1;
437 	s.done = false;
438 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
439 	CU_ASSERT(rc == 0);
440 
441 	wait_for_admin_completion(&s, ctrlr);
442 
443 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
444 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
445 
446 	/* Create SQ 1 again, CQID is 1, this is valid. */
447 	s.done = false;
448 	cmd.cdw11_bits.create_io_sq.cqid = 1;
449 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
450 	CU_ASSERT(rc == 0);
451 
452 	wait_for_admin_completion(&s, ctrlr);
453 
454 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
455 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
456 
457 	spdk_dma_free(buf);
458 	spdk_nvme_detach(ctrlr);
459 }
460 
461 static void
462 admin_create_io_sq_verify_pc(void)
463 {
464 	struct spdk_nvme_ctrlr *ctrlr;
465 	union spdk_nvme_cap_register cap;
466 	struct spdk_nvme_io_qpair_opts opts;
467 	struct spdk_nvme_qpair *qpair;
468 	struct spdk_nvme_cmd cmd;
469 	struct status s;
470 	void *buf;
471 	uint64_t dma_addr;
472 	int rc;
473 
474 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
475 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
476 	SPDK_CU_ASSERT_FATAL(ctrlr);
477 
478 	cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
479 	/* exit the rest of test case if CAP.CQR is 0 */
480 	if (!cap.bits.cqr) {
481 		spdk_nvme_detach(ctrlr);
482 		return;
483 	}
484 
485 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
486 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
487 	SPDK_CU_ASSERT_FATAL(qpair);
488 
489 	/* Delete SQ 1 first, this is valid. */
490 	memset(&cmd, 0, sizeof(cmd));
491 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
492 	cmd.cdw10_bits.delete_io_q.qid = 1;
493 
494 	s.done = false;
495 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
496 	CU_ASSERT(rc == 0);
497 
498 	wait_for_admin_completion(&s, ctrlr);
499 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
500 
501 	buf = spdk_dma_zmalloc((cap.bits.mqes + 1) * sizeof(cmd), 0x1000,  NULL);
502 	SPDK_CU_ASSERT_FATAL(buf != NULL);
503 
504 	/* Create SQ 1, PC is 0, this is invalid */
505 	memset(&cmd, 0, sizeof(cmd));
506 	cmd.opc = SPDK_NVME_OPC_CREATE_IO_SQ;
507 	cmd.cdw10_bits.create_io_q.qid = 1;
508 	cmd.cdw10_bits.create_io_q.qsize = cap.bits.mqes;
509 	cmd.cdw11_bits.create_io_sq.pc = 0;
510 	cmd.cdw11_bits.create_io_sq.cqid = 1;
511 	dma_addr = nvme_vtophys(&g_trid, buf, NULL);
512 	SPDK_CU_ASSERT_FATAL(dma_addr != SPDK_VTOPHYS_ERROR);
513 	cmd.dptr.prp.prp1 = dma_addr;
514 
515 	s.done = false;
516 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
517 	CU_ASSERT(rc == 0);
518 
519 	wait_for_admin_completion(&s, ctrlr);
520 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
521 
522 	/* Create SQ 1 again, PC is 1, this is valid. */
523 	s.done = false;
524 	cmd.cdw11_bits.create_io_sq.pc = 1;
525 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
526 	CU_ASSERT(rc == 0);
527 
528 	wait_for_admin_completion(&s, ctrlr);
529 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
530 
531 	spdk_dma_free(buf);
532 	spdk_nvme_detach(ctrlr);
533 }
534 
535 static void
536 admin_delete_io_cq_delete_cq_first(void)
537 {
538 	struct spdk_nvme_ctrlr *ctrlr;
539 	struct spdk_nvme_io_qpair_opts opts;
540 	struct spdk_nvme_qpair *qpair;
541 	struct spdk_nvme_ns *ns;
542 	uint32_t nsid;
543 	struct spdk_nvme_cmd cmd;
544 	struct status s;
545 	void *buf;
546 	uint32_t nlbas;
547 	int rc;
548 
549 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
550 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
551 	SPDK_CU_ASSERT_FATAL(ctrlr);
552 
553 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
554 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
555 	SPDK_CU_ASSERT_FATAL(qpair);
556 
557 	nsid = spdk_nvme_ctrlr_get_first_active_ns(ctrlr);
558 	ns = spdk_nvme_ctrlr_get_ns(ctrlr, nsid);
559 	SPDK_CU_ASSERT_FATAL(ns != NULL);
560 
561 	/* READ command should execute successfully. */
562 	nlbas = 1;
563 	buf = spdk_dma_zmalloc(nlbas * spdk_nvme_ns_get_sector_size(ns), 0x1000,  NULL);
564 	SPDK_CU_ASSERT_FATAL(buf != NULL);
565 	s.done = false;
566 	rc = spdk_nvme_ns_cmd_read_with_md(ns, qpair, buf, NULL, 0, nlbas, test_cb, &s, 0, 0, 0);
567 	SPDK_CU_ASSERT_FATAL(rc == 0);
568 
569 	wait_for_io_completion(&s, qpair);
570 
571 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
572 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
573 
574 	/* Delete CQ 1, this is invalid. */
575 	memset(&cmd, 0, sizeof(cmd));
576 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
577 	cmd.cdw10_bits.delete_io_q.qid = 1;
578 
579 	s.done = false;
580 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
581 	CU_ASSERT(rc == 0);
582 
583 	wait_for_admin_completion(&s, ctrlr);
584 
585 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
586 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_DELETION);
587 
588 	/* Delete SQ 1, this is valid. */
589 	s.done = false;
590 	memset(&cmd, 0, sizeof(cmd));
591 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
592 	cmd.cdw10_bits.delete_io_q.qid = 1;
593 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
594 	CU_ASSERT(rc == 0);
595 
596 	wait_for_admin_completion(&s, ctrlr);
597 
598 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
599 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
600 
601 	/* Delete CQ 1 again, this is valid */
602 	memset(&cmd, 0, sizeof(cmd));
603 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
604 	cmd.cdw10_bits.delete_io_q.qid = 1;
605 
606 	s.done = false;
607 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
608 	CU_ASSERT(rc == 0);
609 
610 	wait_for_admin_completion(&s, ctrlr);
611 
612 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
613 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
614 
615 	spdk_dma_free(buf);
616 	spdk_nvme_detach(ctrlr);
617 }
618 
619 static void
620 admin_create_io_cq_verify_iv_pc(void)
621 {
622 	struct spdk_nvme_ctrlr *ctrlr;
623 	struct spdk_nvme_cmd cmd;
624 	union spdk_nvme_cap_register cap;
625 	uint32_t mqes;
626 	uint64_t dma_addr;
627 	struct status s;
628 	void *buf;
629 	int rc;
630 
631 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
632 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
633 	SPDK_CU_ASSERT_FATAL(ctrlr);
634 
635 	cap = spdk_nvme_ctrlr_get_regs_cap(ctrlr);
636 	mqes = cap.bits.mqes + 1;
637 	buf = spdk_dma_zmalloc(mqes * sizeof(struct spdk_nvme_cpl), 0x1000,  NULL);
638 	SPDK_CU_ASSERT_FATAL(buf != NULL);
639 	dma_addr = nvme_vtophys(&g_trid, buf, NULL);
640 	SPDK_CU_ASSERT_FATAL(dma_addr != SPDK_VTOPHYS_ERROR);
641 
642 	/* Create CQ 1, IV is 2048, this is invalid */
643 	memset(&cmd, 0, sizeof(cmd));
644 	cmd.opc = SPDK_NVME_OPC_CREATE_IO_CQ;
645 	cmd.cdw10_bits.create_io_q.qid = 1;
646 	cmd.cdw10_bits.create_io_q.qsize = mqes - 1; /* 0 based value */
647 	cmd.cdw11_bits.create_io_cq.pc = 1;
648 	cmd.cdw11_bits.create_io_cq.ien = 1;
649 	cmd.cdw11_bits.create_io_cq.iv = 2048;
650 	cmd.dptr.prp.prp1 = dma_addr;
651 
652 	s.done = false;
653 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
654 	CU_ASSERT(rc == 0);
655 
656 	wait_for_admin_completion(&s, ctrlr);
657 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
658 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR);
659 
660 	/* exit the rest of test case if CAP.CQR is 0 */
661 	if (!cap.bits.cqr) {
662 		goto out;
663 	}
664 
665 	/* Create CQ 1, PC is 0, this is invalid */
666 	cmd.cdw11_bits.create_io_cq.pc = 0;
667 	cmd.cdw11_bits.create_io_cq.iv = 1;
668 	cmd.dptr.prp.prp1 = dma_addr;
669 
670 	s.done = false;
671 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
672 	CU_ASSERT(rc == 0);
673 
674 	wait_for_admin_completion(&s, ctrlr);
675 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
676 
677 	/* PC is 1, this is valid */
678 	cmd.cdw11_bits.create_io_cq.pc = 1;
679 	s.done = false;
680 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
681 	CU_ASSERT(rc == 0);
682 
683 	wait_for_admin_completion(&s, ctrlr);
684 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
685 
686 	/* Delete CQ 1, this is valid. */
687 	memset(&cmd, 0, sizeof(cmd));
688 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
689 	cmd.cdw10_bits.delete_io_q.qid = 1;
690 
691 	s.done = false;
692 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
693 	CU_ASSERT(rc == 0);
694 
695 	wait_for_admin_completion(&s, ctrlr);
696 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
697 
698 out:
699 	spdk_dma_free(buf);
700 	spdk_nvme_detach(ctrlr);
701 }
702 
703 static void
704 fabric_property_get(void)
705 {
706 	struct spdk_nvme_ctrlr *ctrlr;
707 	struct spdk_nvmf_fabric_prop_set_cmd cmd;
708 	struct status s;
709 	int rc;
710 
711 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
712 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
713 	SPDK_CU_ASSERT_FATAL(ctrlr);
714 
715 	memset(&cmd, 0, sizeof(cmd));
716 	cmd.opcode = SPDK_NVME_OPC_FABRIC;
717 	cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
718 	cmd.ofst = 0; /* CAP */
719 	cmd.attrib.size = SPDK_NVMF_PROP_SIZE_8;
720 
721 	s.done = false;
722 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, (struct spdk_nvme_cmd *)&cmd, NULL, 0, test_cb, &s);
723 	CU_ASSERT(rc == 0);
724 
725 	wait_for_admin_completion(&s, ctrlr);
726 
727 	/* Non-fabrics controllers should fail an SPDK_NVME_OPC_FABRIC. */
728 	if (spdk_nvme_ctrlr_is_fabrics(ctrlr)) {
729 		CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
730 	} else {
731 		CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
732 	}
733 
734 	spdk_nvme_detach(ctrlr);
735 }
736 
737 static void
738 admin_set_features_number_of_queues(void)
739 {
740 	struct spdk_nvme_ctrlr *ctrlr;
741 	struct spdk_nvme_io_qpair_opts opts;
742 	struct spdk_nvme_qpair *qpair;
743 	struct spdk_nvme_cmd cmd;
744 	struct status s;
745 	int rc;
746 
747 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
748 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
749 	SPDK_CU_ASSERT_FATAL(ctrlr);
750 
751 	/* NCQR and NSQR are 65535, invalid */
752 	memset(&cmd, 0, sizeof(cmd));
753 	cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
754 	cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
755 	cmd.cdw11_bits.feat_num_of_queues.bits.ncqr = UINT16_MAX;
756 	cmd.cdw11_bits.feat_num_of_queues.bits.nsqr = UINT16_MAX;
757 
758 	s.done = false;
759 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
760 	CU_ASSERT(rc == 0);
761 
762 	wait_for_admin_completion(&s, ctrlr);
763 
764 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
765 
766 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
767 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
768 	SPDK_CU_ASSERT_FATAL(qpair);
769 
770 	/* After the IO queue is created, invalid */
771 	cmd.cdw11_bits.feat_num_of_queues.bits.ncqr = 128;
772 	cmd.cdw11_bits.feat_num_of_queues.bits.nsqr = 128;
773 
774 	s.done = false;
775 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
776 	CU_ASSERT(rc == 0);
777 
778 	wait_for_admin_completion(&s, ctrlr);
779 
780 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
781 
782 	spdk_nvme_detach(ctrlr);
783 }
784 
785 /* Test the mandatory features with Get Features command:
786  * 01h Arbitration.
787  * 02h Power Management.
788  * 04h Temperature Threshold.
789  * 05h Error Recovery.
790  * 07h Number of Queues.
791  * 08h Interrupt Coalescing.
792  * 09h Interrupt Vector Configuration.
793  * 0Ah Write Atomicity Normal.
794  * 0Bh Asynchronous Event Configuration.
795  */
796 static void
797 admin_get_features_mandatory_features(void)
798 {
799 	struct spdk_nvme_ctrlr *ctrlr;
800 	struct spdk_nvme_cmd cmd;
801 	struct status s;
802 	int rc;
803 
804 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
805 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
806 	SPDK_CU_ASSERT_FATAL(ctrlr);
807 
808 	/* Arbitration */
809 	memset(&cmd, 0, sizeof(cmd));
810 	cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
811 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ARBITRATION;
812 
813 	s.done = false;
814 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
815 	CU_ASSERT(rc == 0);
816 
817 	wait_for_admin_completion(&s, ctrlr);
818 
819 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
820 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
821 
822 	/* Power Management */
823 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_POWER_MANAGEMENT;
824 
825 	s.done = false;
826 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
827 	CU_ASSERT(rc == 0);
828 
829 	wait_for_admin_completion(&s, ctrlr);
830 
831 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
832 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
833 
834 	/* Temperature Threshold */
835 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
836 
837 	s.done = false;
838 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
839 	CU_ASSERT(rc == 0);
840 
841 	wait_for_admin_completion(&s, ctrlr);
842 
843 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
844 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
845 
846 	/* Error Recovery */
847 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
848 
849 	s.done = false;
850 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
851 	CU_ASSERT(rc == 0);
852 
853 	wait_for_admin_completion(&s, ctrlr);
854 
855 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
856 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
857 
858 	/* Number of Queues */
859 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
860 
861 	s.done = false;
862 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
863 	CU_ASSERT(rc == 0);
864 
865 	wait_for_admin_completion(&s, ctrlr);
866 
867 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
868 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
869 
870 	/* Interrupt Coalescing */
871 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_INTERRUPT_COALESCING;
872 
873 	s.done = false;
874 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
875 	CU_ASSERT(rc == 0);
876 
877 	wait_for_admin_completion(&s, ctrlr);
878 
879 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
880 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
881 
882 	/* Interrupt Vector Configuration */
883 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION;
884 
885 	s.done = false;
886 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
887 	CU_ASSERT(rc == 0);
888 
889 	wait_for_admin_completion(&s, ctrlr);
890 
891 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
892 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
893 
894 	/* Write Atomicity Normal */
895 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_WRITE_ATOMICITY;
896 
897 	s.done = false;
898 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
899 	CU_ASSERT(rc == 0);
900 
901 	wait_for_admin_completion(&s, ctrlr);
902 
903 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
904 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
905 
906 	/* Asynchronous Event Configuration */
907 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION;
908 
909 	s.done = false;
910 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
911 	CU_ASSERT(rc == 0);
912 
913 	wait_for_admin_completion(&s, ctrlr);
914 
915 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
916 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
917 
918 	spdk_nvme_detach(ctrlr);
919 }
920 
921 /* Test the optional features with Get Features command:
922  * 0Fh Keep Alive Timer.
923  * 16h Host Behavior Support.
924  */
925 static void
926 admin_get_features_optional_features(void)
927 {
928 	struct spdk_nvme_ctrlr *ctrlr;
929 	struct spdk_nvme_cmd cmd;
930 	struct status s;
931 	void *buf;
932 	int rc;
933 
934 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
935 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
936 	SPDK_CU_ASSERT_FATAL(ctrlr);
937 
938 	/* Keep Alive Timer */
939 	memset(&cmd, 0, sizeof(cmd));
940 	cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
941 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_KEEP_ALIVE_TIMER;
942 
943 	s.done = false;
944 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
945 	CU_ASSERT(rc == 0);
946 
947 	wait_for_admin_completion(&s, ctrlr);
948 
949 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
950 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
951 
952 	/* Host Behavior Support */
953 	buf = spdk_dma_zmalloc(sizeof(struct spdk_nvme_host_behavior), 0x1000,  NULL);
954 	SPDK_CU_ASSERT_FATAL(buf != NULL);
955 
956 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT;
957 
958 	s.done = false;
959 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf, sizeof(struct spdk_nvme_host_behavior),
960 					   test_cb, &s);
961 	CU_ASSERT(rc == 0);
962 
963 	wait_for_admin_completion(&s, ctrlr);
964 
965 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
966 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_SUCCESS);
967 
968 	spdk_dma_free(buf);
969 	spdk_nvme_detach(ctrlr);
970 }
971 
972 static void
973 admin_create_io_qp_max_qps(void)
974 {
975 	struct spdk_nvme_ctrlr *ctrlr;
976 	struct spdk_nvme_cmd cmd;
977 	struct spdk_nvme_io_qpair_opts opts;
978 	struct spdk_nvme_qpair *qpair;
979 	struct status s;
980 	uint32_t ncqr, nsqr, i, num_of_queues;
981 	int rc;
982 
983 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
984 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
985 	SPDK_CU_ASSERT_FATAL(ctrlr);
986 
987 	memset(&cmd, 0, sizeof(cmd));
988 	cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
989 	/* Number of Queues */
990 	cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
991 
992 	s.done = false;
993 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
994 	CU_ASSERT(rc == 0);
995 	wait_for_admin_completion(&s, ctrlr);
996 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
997 
998 	nsqr = s.cpl.cdw0 & 0xffffu;
999 	ncqr = (s.cpl.cdw0 & 0xffff0000u) >> 16;
1000 
1001 	num_of_queues = spdk_min(nsqr, ncqr) + 1;
1002 
1003 	spdk_nvme_ctrlr_get_default_io_qpair_opts(ctrlr, &opts, sizeof(opts));
1004 	/* choose a small value to save memory */
1005 	opts.io_queue_size = 2;
1006 
1007 	/* create all the IO queue pairs, valid */
1008 	for (i = 0; i < num_of_queues; i++) {
1009 		qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
1010 		CU_ASSERT(qpair != NULL);
1011 	}
1012 
1013 	/* create one more, invalid */
1014 	qpair = spdk_nvme_ctrlr_alloc_io_qpair(ctrlr, &opts, sizeof(opts));
1015 	CU_ASSERT(qpair == NULL);
1016 
1017 	spdk_nvme_detach(ctrlr);
1018 }
1019 
1020 static void
1021 admin_identify_ns(void)
1022 {
1023 	struct spdk_nvme_ctrlr *ctrlr;
1024 	struct spdk_nvme_cmd cmd;
1025 	const struct spdk_nvme_ctrlr_data *cdata;
1026 	struct spdk_nvme_ns_data *ns_data;
1027 	struct spdk_nvme_ns *ns;
1028 	uint32_t i, active_nsid, inactive_nsid;
1029 	uint32_t nows, npwg, max_xfer_size;
1030 	struct status s;
1031 	int rc;
1032 
1033 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
1034 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
1035 	SPDK_CU_ASSERT_FATAL(ctrlr);
1036 
1037 	cdata = spdk_nvme_ctrlr_get_data(ctrlr);
1038 	/* Find active NSID and inactive NSID if exist */
1039 	active_nsid = inactive_nsid = 0;
1040 	for (i = 1; i <= cdata->nn; i++) {
1041 		if (spdk_nvme_ctrlr_is_active_ns(ctrlr, i)) {
1042 			active_nsid = i;
1043 		} else {
1044 			inactive_nsid = i;
1045 		}
1046 
1047 		if (active_nsid && inactive_nsid) {
1048 			break;
1049 		}
1050 	}
1051 
1052 	ns_data = spdk_dma_zmalloc(sizeof(*ns_data), 0x1000, NULL);
1053 	SPDK_CU_ASSERT_FATAL(ns_data != NULL);
1054 
1055 	/* NSID is 0, invalid */
1056 	memset(&cmd, 0, sizeof(cmd));
1057 	cmd.opc = SPDK_NVME_OPC_IDENTIFY;
1058 	cmd.nsid = 0;
1059 	cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS;
1060 
1061 	s.done = false;
1062 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ns_data,
1063 					   sizeof(*ns_data), test_cb, &s);
1064 	CU_ASSERT(rc == 0);
1065 
1066 	wait_for_admin_completion(&s, ctrlr);
1067 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1068 
1069 	/* NSID is 0xffffffff, up to OACS can support NS MANAGE or not */
1070 	cmd.nsid = 0xffffffff;
1071 	s.done = false;
1072 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ns_data,
1073 					   sizeof(*ns_data), test_cb, &s);
1074 	CU_ASSERT(rc == 0);
1075 
1076 	wait_for_admin_completion(&s, ctrlr);
1077 	if (!cdata->oacs.ns_manage) {
1078 		CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1079 	} else {
1080 		CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1081 	}
1082 
1083 	/* NSID is active, valid */
1084 	if (active_nsid) {
1085 		cmd.nsid = active_nsid;
1086 		s.done = false;
1087 		rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ns_data,
1088 						   sizeof(*ns_data), test_cb, &s);
1089 		CU_ASSERT(rc == 0);
1090 
1091 		wait_for_admin_completion(&s, ctrlr);
1092 		CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1093 
1094 		max_xfer_size = spdk_nvme_ctrlr_get_max_xfer_size(ctrlr);
1095 		ns = spdk_nvme_ctrlr_get_ns(ctrlr, active_nsid);
1096 		SPDK_CU_ASSERT_FATAL(ns != NULL);
1097 
1098 		if (ns_data->nsfeat.optperf) {
1099 			npwg = ns_data->npwg + 1;
1100 			nows = ns_data->nows + 1;
1101 
1102 			CU_ASSERT(npwg * spdk_nvme_ns_get_sector_size(ns) <= max_xfer_size);
1103 			CU_ASSERT(nows * spdk_nvme_ns_get_sector_size(ns) <= max_xfer_size);
1104 			CU_ASSERT(nows % npwg == 0);
1105 		}
1106 	}
1107 
1108 	/* NSID is inactive, valid and should contain zeroed data */
1109 	if (inactive_nsid) {
1110 		memset(ns_data, 0x5A, sizeof(*ns_data));
1111 		cmd.nsid = inactive_nsid;
1112 		s.done = false;
1113 		rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, ns_data,
1114 						   sizeof(*ns_data), test_cb, &s);
1115 		CU_ASSERT(rc == 0);
1116 
1117 		wait_for_admin_completion(&s, ctrlr);
1118 		CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1119 		CU_ASSERT(spdk_mem_all_zero(ns_data, sizeof(*ns_data)));
1120 	}
1121 
1122 	spdk_dma_free(ns_data);
1123 	spdk_nvme_detach(ctrlr);
1124 }
1125 
1126 /* Mandatory Log Page Identifiers
1127  * 01h Error Information
1128  * 02h SMART / Health Information
1129  * 03h Firmware Slot Information
1130  */
1131 static void
1132 admin_get_log_page_mandatory_logs(void)
1133 {
1134 	struct spdk_nvme_ctrlr *ctrlr;
1135 	struct spdk_nvme_cmd cmd;
1136 	void *buf;
1137 	struct status s;
1138 	int rc;
1139 
1140 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
1141 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
1142 	SPDK_CU_ASSERT_FATAL(ctrlr);
1143 
1144 	buf = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
1145 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1146 
1147 	/* 01h Error Information, valid */
1148 	memset(&cmd, 0, sizeof(cmd));
1149 	cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
1150 	cmd.cdw10_bits.get_log_page.numdl = sizeof(struct spdk_nvme_error_information_entry) / 4 - 1;
1151 	cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
1152 
1153 	s.done = false;
1154 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1155 					   sizeof(struct spdk_nvme_error_information_entry), test_cb, &s);
1156 	CU_ASSERT(rc == 0);
1157 
1158 	wait_for_admin_completion(&s, ctrlr);
1159 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1160 
1161 	/* 02h SMART / Health Information, valid */
1162 	cmd.cdw10_bits.get_log_page.numdl = sizeof(struct spdk_nvme_health_information_page) / 4 - 1;
1163 	cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_HEALTH_INFORMATION;
1164 
1165 	s.done = false;
1166 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1167 					   sizeof(struct spdk_nvme_health_information_page), test_cb, &s);
1168 	CU_ASSERT(rc == 0);
1169 
1170 	wait_for_admin_completion(&s, ctrlr);
1171 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1172 
1173 	/* 03h Firmware Slot Information, valid */
1174 	cmd.cdw10_bits.get_log_page.numdl = sizeof(struct spdk_nvme_firmware_page) / 4 - 1;
1175 	cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_FIRMWARE_SLOT;
1176 
1177 	s.done = false;
1178 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1179 					   sizeof(struct spdk_nvme_firmware_page), test_cb, &s);
1180 	CU_ASSERT(rc == 0);
1181 
1182 	wait_for_admin_completion(&s, ctrlr);
1183 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1184 
1185 	spdk_dma_free(buf);
1186 	spdk_nvme_detach(ctrlr);
1187 }
1188 
1189 static void
1190 admin_get_log_page_with_lpo(void)
1191 {
1192 	struct spdk_nvme_ctrlr *ctrlr;
1193 	struct spdk_nvme_cmd cmd;
1194 	void *buf;
1195 	struct status s;
1196 	int rc;
1197 
1198 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
1199 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
1200 	SPDK_CU_ASSERT_FATAL(ctrlr);
1201 
1202 	buf = spdk_dma_zmalloc(0x1000, 0x1000, NULL);
1203 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1204 
1205 	/* 03h Firmware Slot Information, valid */
1206 	memset(&cmd, 0, sizeof(cmd));
1207 	cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
1208 	cmd.cdw10_bits.get_log_page.numdl = sizeof(struct spdk_nvme_firmware_page) / 4 - 1;
1209 	cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_FIRMWARE_SLOT;
1210 
1211 	s.done = false;
1212 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1213 					   sizeof(struct spdk_nvme_firmware_page), test_cb, &s);
1214 	CU_ASSERT(rc == 0);
1215 
1216 	wait_for_admin_completion(&s, ctrlr);
1217 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1218 
1219 	/* Log Page Offset Lower is greater than spdk_nvme_firmware_page, invalid */
1220 	cmd.cdw12 = sizeof(struct spdk_nvme_firmware_page) + 4;
1221 	s.done = false;
1222 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1223 					   sizeof(struct spdk_nvme_firmware_page), test_cb, &s);
1224 	CU_ASSERT(rc == 0);
1225 
1226 	wait_for_admin_completion(&s, ctrlr);
1227 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
1228 
1229 	/* Log Page Offset Lower is less than spdk_nvme_firmware_page, but greater than 0, valid */
1230 	cmd.cdw12 = 4;
1231 	s.done = false;
1232 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, buf,
1233 					   sizeof(struct spdk_nvme_firmware_page), test_cb, &s);
1234 	CU_ASSERT(rc == 0);
1235 
1236 	wait_for_admin_completion(&s, ctrlr);
1237 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1238 
1239 	spdk_dma_free(buf);
1240 	spdk_nvme_detach(ctrlr);
1241 }
1242 
1243 static void
1244 admin_create_io_sq_shared_cq(void)
1245 {
1246 	struct spdk_nvme_ctrlr *ctrlr;
1247 	struct spdk_nvme_cmd cmd;
1248 	void *buf;
1249 	uint64_t dma_addr;
1250 	struct status s;
1251 	int rc;
1252 
1253 	SPDK_CU_ASSERT_FATAL(spdk_nvme_transport_id_parse(&g_trid, g_trid_str) == 0);
1254 	ctrlr = spdk_nvme_connect(&g_trid, NULL, 0);
1255 	SPDK_CU_ASSERT_FATAL(ctrlr);
1256 
1257 	/* we will create 4 SQs and 2 CQs, each queue will use 1 page */
1258 	buf = spdk_dma_zmalloc(0x6000, 0x1000, NULL);
1259 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1260 	dma_addr = nvme_vtophys(&g_trid, buf, NULL);
1261 	SPDK_CU_ASSERT_FATAL(dma_addr != SPDK_VTOPHYS_ERROR);
1262 
1263 	memset(&cmd, 0, sizeof(cmd));
1264 	cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
1265 	/* Number of Queues */
1266 	cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_NUMBER_OF_QUEUES;
1267 	cmd.cdw11_bits.feat_num_of_queues.bits.ncqr = 1; /* 0 based value */
1268 	cmd.cdw11_bits.feat_num_of_queues.bits.nsqr = 3; /* 0 based value */
1269 
1270 	s.done = false;
1271 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1272 	CU_ASSERT(rc == 0);
1273 	wait_for_admin_completion(&s, ctrlr);
1274 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1275 
1276 	/* Create CQ 1, this is valid */
1277 	memset(&cmd, 0, sizeof(cmd));
1278 	cmd.opc = SPDK_NVME_OPC_CREATE_IO_CQ;
1279 	cmd.cdw10_bits.create_io_q.qid = 1;
1280 	cmd.cdw10_bits.create_io_q.qsize = 7; /* 0 based value */
1281 	cmd.cdw11_bits.create_io_cq.pc = 1;
1282 	cmd.cdw11_bits.create_io_cq.ien = 1;
1283 	cmd.cdw11_bits.create_io_cq.iv = 1;
1284 	cmd.dptr.prp.prp1 = dma_addr;
1285 
1286 	s.done = false;
1287 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1288 	CU_ASSERT(rc == 0);
1289 	wait_for_admin_completion(&s, ctrlr);
1290 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1291 
1292 	/* Create CQ 2, this is valid */
1293 	cmd.cdw10_bits.create_io_q.qid = 2;
1294 	cmd.cdw11_bits.create_io_cq.iv = 2;
1295 	cmd.dptr.prp.prp1 = dma_addr + 0x1000;
1296 
1297 	s.done = false;
1298 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1299 	CU_ASSERT(rc == 0);
1300 	wait_for_admin_completion(&s, ctrlr);
1301 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1302 
1303 	/* Create SQ 1, CQID 2, this is valid */
1304 	memset(&cmd, 0, sizeof(cmd));
1305 	cmd.opc = SPDK_NVME_OPC_CREATE_IO_SQ;
1306 	cmd.cdw10_bits.create_io_q.qid = 1;
1307 	cmd.cdw10_bits.create_io_q.qsize = 7; /* 0 based value */
1308 	cmd.cdw11_bits.create_io_sq.pc = 1;
1309 	cmd.cdw11_bits.create_io_sq.cqid = 2;
1310 	cmd.dptr.prp.prp1 = dma_addr + 0x2000;
1311 
1312 	s.done = false;
1313 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1314 	CU_ASSERT(rc == 0);
1315 	wait_for_admin_completion(&s, ctrlr);
1316 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1317 
1318 	/* Create SQ 2, CQID 2, this is valid */
1319 	cmd.cdw10_bits.create_io_q.qid = 2;
1320 	cmd.dptr.prp.prp1 = dma_addr + 0x3000;
1321 
1322 	s.done = false;
1323 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1324 	CU_ASSERT(rc == 0);
1325 	wait_for_admin_completion(&s, ctrlr);
1326 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1327 
1328 	/* Create SQ 3, CQID 2, this is valid */
1329 	cmd.cdw10_bits.create_io_q.qid = 3;
1330 	cmd.dptr.prp.prp1 = dma_addr + 0x4000;
1331 
1332 	s.done = false;
1333 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1334 	CU_ASSERT(rc == 0);
1335 	wait_for_admin_completion(&s, ctrlr);
1336 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1337 
1338 	/* Create SQ 4, CQID 1, this is valid */
1339 	cmd.cdw10_bits.create_io_q.qid = 4;
1340 	cmd.cdw11_bits.create_io_sq.cqid = 1;
1341 	cmd.dptr.prp.prp1 = dma_addr + 0x5000;
1342 
1343 	s.done = false;
1344 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1345 	CU_ASSERT(rc == 0);
1346 	wait_for_admin_completion(&s, ctrlr);
1347 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1348 
1349 	/* Delete SQ 1, this is valid */
1350 	memset(&cmd, 0, sizeof(cmd));
1351 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
1352 	cmd.cdw10_bits.delete_io_q.qid = 1;
1353 
1354 	s.done = false;
1355 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1356 	CU_ASSERT(rc == 0);
1357 	wait_for_admin_completion(&s, ctrlr);
1358 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1359 
1360 	/* Delete SQ 2, this is valid */
1361 	cmd.cdw10_bits.delete_io_q.qid = 2;
1362 
1363 	s.done = false;
1364 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1365 	CU_ASSERT(rc == 0);
1366 	wait_for_admin_completion(&s, ctrlr);
1367 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1368 
1369 	/* Delete CQ 2, this is invalid */
1370 	memset(&cmd, 0, sizeof(cmd));
1371 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
1372 	cmd.cdw10_bits.delete_io_q.qid = 2;
1373 
1374 	s.done = false;
1375 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1376 	CU_ASSERT(rc == 0);
1377 	wait_for_admin_completion(&s, ctrlr);
1378 	CU_ASSERT(s.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
1379 	CU_ASSERT(s.cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_DELETION);
1380 
1381 	/* Delete SQ 3, this is valid */
1382 	memset(&cmd, 0, sizeof(cmd));
1383 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
1384 	cmd.cdw10_bits.delete_io_q.qid = 3;
1385 
1386 	s.done = false;
1387 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1388 	CU_ASSERT(rc == 0);
1389 	wait_for_admin_completion(&s, ctrlr);
1390 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1391 
1392 	/* Delete SQ 4, this is valid */
1393 	memset(&cmd, 0, sizeof(cmd));
1394 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_SQ;
1395 	cmd.cdw10_bits.delete_io_q.qid = 4;
1396 
1397 	s.done = false;
1398 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1399 	CU_ASSERT(rc == 0);
1400 	wait_for_admin_completion(&s, ctrlr);
1401 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1402 
1403 	/* Delete CQ 2, this is valid */
1404 	memset(&cmd, 0, sizeof(cmd));
1405 	cmd.opc = SPDK_NVME_OPC_DELETE_IO_CQ;
1406 	cmd.cdw10_bits.delete_io_q.qid = 2;
1407 
1408 	s.done = false;
1409 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1410 	CU_ASSERT(rc == 0);
1411 	wait_for_admin_completion(&s, ctrlr);
1412 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1413 
1414 	/* Delete CQ 1, this is valid */
1415 	cmd.cdw10_bits.delete_io_q.qid = 1;
1416 
1417 	s.done = false;
1418 	rc = spdk_nvme_ctrlr_cmd_admin_raw(ctrlr, &cmd, NULL, 0, test_cb, &s);
1419 	CU_ASSERT(rc == 0);
1420 	wait_for_admin_completion(&s, ctrlr);
1421 	CU_ASSERT(!spdk_nvme_cpl_is_error(&s.cpl));
1422 
1423 	spdk_dma_free(buf);
1424 	spdk_nvme_detach(ctrlr);
1425 }
1426 
1427 static struct option g_options[] = {
1428 #define OPTION_TRID 'r'
1429 	{"trid", required_argument, NULL, OPTION_TRID},
1430 #define OPTION_SINGLE_FILE_SEGMENTS 'g'
1431 	{"single-file-segments", no_argument, NULL, OPTION_SINGLE_FILE_SEGMENTS},
1432 };
1433 
1434 static int
1435 parse_arg(int op, const char *optarg, void *cb_arg)
1436 {
1437 	struct spdk_env_opts *opts = cb_arg;
1438 
1439 	switch (op) {
1440 	case 'g':
1441 		opts->hugepage_single_segments = true;
1442 		break;
1443 	case 'r':
1444 		g_trid_str = optarg;
1445 		break;
1446 	default:
1447 		SPDK_ERRLOG("Unknown op '%c'\n", op);
1448 		return -1;
1449 	}
1450 
1451 	return 0;
1452 }
1453 
1454 static int
1455 init(void *cb_arg)
1456 {
1457 	struct spdk_env_opts *opts = cb_arg;
1458 	int rc;
1459 
1460 	if (g_trid_str == NULL) {
1461 		fprintf(stderr, "-t <trid> not specified\n");
1462 		return -EINVAL;
1463 	}
1464 
1465 	rc = spdk_env_init(opts);
1466 	if (rc != 0) {
1467 		fprintf(stderr, "could not spdk_env_init\n");
1468 		return rc;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 static void
1475 usage(void *cb_arg)
1476 {
1477 	printf("  -r, --trid                       transport ID\n");
1478 	printf("  -g, --single-file-segments       force creating just one hugetlbfs file\n");
1479 }
1480 
1481 int
1482 main(int argc, char **argv)
1483 {
1484 	struct spdk_env_opts	opts;
1485 	struct spdk_ut_opts	ut_opts = {
1486 		.optstring = "gr:",
1487 		.opts = g_options,
1488 		.optlen = SPDK_COUNTOF(g_options),
1489 		.cb_arg = &opts,
1490 		.option_cb_fn = parse_arg,
1491 		.init_cb_fn = init,
1492 		.usage_cb_fn = usage,
1493 	};
1494 	CU_pSuite		suite = NULL;
1495 	unsigned int		num_failures;
1496 
1497 	CU_initialize_registry();
1498 
1499 	suite = CU_add_suite("nvme_compliance", NULL, NULL);
1500 	CU_ADD_TEST(suite, admin_identify_ctrlr_verify_dptr);
1501 	CU_ADD_TEST(suite, admin_identify_ctrlr_verify_fused);
1502 	CU_ADD_TEST(suite, admin_identify_ns);
1503 	CU_ADD_TEST(suite, admin_get_features_mandatory_features);
1504 	CU_ADD_TEST(suite, admin_get_features_optional_features);
1505 	CU_ADD_TEST(suite, admin_set_features_number_of_queues);
1506 	CU_ADD_TEST(suite, admin_get_log_page_mandatory_logs);
1507 	CU_ADD_TEST(suite, admin_get_log_page_with_lpo);
1508 	CU_ADD_TEST(suite, fabric_property_get);
1509 	CU_ADD_TEST(suite, admin_delete_io_sq_use_admin_qid);
1510 	CU_ADD_TEST(suite, admin_delete_io_sq_delete_sq_twice);
1511 	CU_ADD_TEST(suite, admin_delete_io_cq_use_admin_qid);
1512 	CU_ADD_TEST(suite, admin_delete_io_cq_delete_cq_first);
1513 	CU_ADD_TEST(suite, admin_create_io_cq_verify_iv_pc);
1514 	CU_ADD_TEST(suite, admin_create_io_sq_verify_qsize_cqid);
1515 	CU_ADD_TEST(suite, admin_create_io_sq_verify_pc);
1516 	CU_ADD_TEST(suite, admin_create_io_qp_max_qps);
1517 	CU_ADD_TEST(suite, admin_create_io_sq_shared_cq);
1518 
1519 	spdk_env_opts_init(&opts);
1520 	opts.name = "nvme_compliance";
1521 
1522 	num_failures = spdk_ut_run_tests(argc, argv, &ut_opts);
1523 	CU_cleanup_registry();
1524 	return num_failures;
1525 }
1526