xref: /spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "common/lib/test_env.c"
39 
40 pid_t g_spdk_nvme_pid;
41 
42 bool trace_flag = false;
43 #define SPDK_LOG_NVME trace_flag
44 
45 #include "nvme/nvme_qpair.c"
46 
47 SPDK_LOG_REGISTER_COMPONENT(nvme)
48 
49 struct nvme_driver _g_nvme_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 };
52 
53 DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
54 DEFINE_STUB(nvme_transport_qpair_submit_request, int,
55 	    (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
56 DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
57 DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
58 		struct spdk_nvme_qpair *qpair));
59 DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
60 
61 DEFINE_STUB_V(nvme_ctrlr_complete_queued_async_events, (struct spdk_nvme_ctrlr *ctrlr));
62 
63 void
64 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
65 {
66 	if (hot_remove) {
67 		ctrlr->is_removed = true;
68 	}
69 	ctrlr->is_failed = true;
70 }
71 
72 static bool g_called_transport_process_completions = false;
73 static int32_t g_transport_process_completions_rc = 0;
74 int32_t
75 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
76 {
77 	g_called_transport_process_completions = true;
78 	return g_transport_process_completions_rc;
79 }
80 
81 static void
82 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
83 			    struct spdk_nvme_ctrlr *ctrlr)
84 {
85 	memset(ctrlr, 0, sizeof(*ctrlr));
86 	ctrlr->free_io_qids = NULL;
87 	TAILQ_INIT(&ctrlr->active_io_qpairs);
88 	TAILQ_INIT(&ctrlr->active_procs);
89 	MOCK_CLEAR(spdk_zmalloc);
90 	nvme_qpair_init(qpair, 1, ctrlr, 0, 32, false);
91 }
92 
93 static void
94 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
95 {
96 	free(qpair->req_buf);
97 }
98 
99 static void
100 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
101 {
102 	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
103 }
104 
105 static void
106 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
107 {
108 	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
109 }
110 
111 static void
112 test3(void)
113 {
114 	struct spdk_nvme_qpair		qpair = {};
115 	struct nvme_request		*req;
116 	struct spdk_nvme_ctrlr		ctrlr = {};
117 
118 	qpair.state = NVME_QPAIR_ENABLED;
119 	prepare_submit_request_test(&qpair, &ctrlr);
120 
121 	req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
122 	SPDK_CU_ASSERT_FATAL(req != NULL);
123 
124 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
125 
126 	nvme_free_request(req);
127 
128 	cleanup_submit_request_test(&qpair);
129 }
130 
131 static void
132 test_ctrlr_failed(void)
133 {
134 	struct spdk_nvme_qpair		qpair = {};
135 	struct nvme_request		*req;
136 	struct spdk_nvme_ctrlr		ctrlr = {};
137 	char				payload[4096];
138 
139 	prepare_submit_request_test(&qpair, &ctrlr);
140 
141 	req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
142 					   NULL);
143 	SPDK_CU_ASSERT_FATAL(req != NULL);
144 
145 	/* Set the controller to failed.
146 	 * Set the controller to resetting so that the qpair won't get re-enabled.
147 	 */
148 	ctrlr.is_failed = true;
149 	ctrlr.is_resetting = true;
150 
151 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
152 
153 	cleanup_submit_request_test(&qpair);
154 }
155 
156 static void struct_packing(void)
157 {
158 	/* ctrlr is the first field in nvme_qpair after the fields
159 	 * that are used in the I/O path. Make sure the I/O path fields
160 	 * all fit into two cache lines.
161 	 */
162 	CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
163 }
164 
165 static int g_num_cb_failed = 0;
166 static int g_num_cb_passed = 0;
167 
168 static void
169 dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
170 {
171 	if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
172 		g_num_cb_passed++;
173 	} else {
174 		g_num_cb_failed++;
175 	}
176 }
177 
178 static void test_nvme_qpair_process_completions(void)
179 {
180 	struct spdk_nvme_qpair		admin_qp = {0};
181 	struct spdk_nvme_qpair		qpair = {0};
182 	struct spdk_nvme_ctrlr		ctrlr = {0};
183 	struct nvme_request		dummy_1 = {{0}};
184 	struct nvme_request		dummy_2 = {{0}};
185 	int				rc;
186 
187 	dummy_1.cb_fn = dummy_cb_fn;
188 	dummy_2.cb_fn = dummy_cb_fn;
189 	dummy_1.qpair = &qpair;
190 	dummy_2.qpair = &qpair;
191 
192 	TAILQ_INIT(&ctrlr.active_io_qpairs);
193 	TAILQ_INIT(&ctrlr.active_procs);
194 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
195 	nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32, false);
196 	nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32, false);
197 
198 	ctrlr.adminq = &admin_qp;
199 
200 	STAILQ_INIT(&qpair.queued_req);
201 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
202 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
203 
204 	/* If the controller is failed, return -ENXIO */
205 	ctrlr.is_failed = true;
206 	ctrlr.is_removed = false;
207 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
208 	CU_ASSERT(rc == -ENXIO);
209 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
210 	CU_ASSERT(g_num_cb_passed == 0);
211 	CU_ASSERT(g_num_cb_failed == 0);
212 
213 	/* Same if the qpair is failed at the transport layer. */
214 	ctrlr.is_failed = false;
215 	ctrlr.is_removed = false;
216 	qpair.state = NVME_QPAIR_DISCONNECTED;
217 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
218 	CU_ASSERT(rc == -ENXIO);
219 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
220 	CU_ASSERT(g_num_cb_passed == 0);
221 	CU_ASSERT(g_num_cb_failed == 0);
222 
223 	/* If the controller is removed, make sure we abort the requests. */
224 	ctrlr.is_failed = true;
225 	ctrlr.is_removed = true;
226 	qpair.state = NVME_QPAIR_CONNECTED;
227 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
228 	CU_ASSERT(rc == -ENXIO);
229 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
230 	CU_ASSERT(g_num_cb_passed == 0);
231 	CU_ASSERT(g_num_cb_failed == 2);
232 
233 	/* If we are resetting, make sure that we don't call into the transport. */
234 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
235 	dummy_1.queued = true;
236 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
237 	dummy_2.queued = true;
238 	g_num_cb_failed = 0;
239 	ctrlr.is_failed = false;
240 	ctrlr.is_removed = false;
241 	ctrlr.is_resetting = true;
242 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
243 	CU_ASSERT(rc == -ENXIO);
244 	CU_ASSERT(g_called_transport_process_completions == false);
245 	/* We also need to make sure we didn't abort the requests. */
246 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
247 	CU_ASSERT(g_num_cb_passed == 0);
248 	CU_ASSERT(g_num_cb_failed == 0);
249 
250 	/* The case where we aren't resetting, but are enabling the qpair is the same as above. */
251 	ctrlr.is_resetting = false;
252 	qpair.state = NVME_QPAIR_ENABLING;
253 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
254 	CU_ASSERT(rc == -ENXIO);
255 	CU_ASSERT(g_called_transport_process_completions == false);
256 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
257 	CU_ASSERT(g_num_cb_passed == 0);
258 	CU_ASSERT(g_num_cb_failed == 0);
259 
260 	/* For other qpair states, we want to enable the qpair. */
261 	qpair.state = NVME_QPAIR_CONNECTED;
262 	rc = spdk_nvme_qpair_process_completions(&qpair, 1);
263 	CU_ASSERT(rc == 0);
264 	CU_ASSERT(g_called_transport_process_completions == true);
265 	/* These should have been submitted to the lower layer. */
266 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
267 	CU_ASSERT(g_num_cb_passed == 0);
268 	CU_ASSERT(g_num_cb_failed == 0);
269 	CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
270 
271 	g_called_transport_process_completions = false;
272 	g_transport_process_completions_rc = -ENXIO;
273 
274 	/* Fail the controller if we get an error from the transport on admin qpair. */
275 	admin_qp.state = NVME_QPAIR_ENABLED;
276 	rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
277 	CU_ASSERT(rc == -ENXIO);
278 	CU_ASSERT(g_called_transport_process_completions == true);
279 	CU_ASSERT(ctrlr.is_failed == true);
280 
281 	/* Don't fail the controller for regular qpairs. */
282 	ctrlr.is_failed = false;
283 	g_called_transport_process_completions = false;
284 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
285 	CU_ASSERT(rc == -ENXIO);
286 	CU_ASSERT(g_called_transport_process_completions == true);
287 	CU_ASSERT(ctrlr.is_failed == false);
288 
289 	/* Make sure we don't modify the return value from the transport. */
290 	ctrlr.is_failed = false;
291 	g_called_transport_process_completions = false;
292 	g_transport_process_completions_rc = 23;
293 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
294 	CU_ASSERT(rc == 23);
295 	CU_ASSERT(g_called_transport_process_completions == true);
296 	CU_ASSERT(ctrlr.is_failed == false);
297 
298 	free(qpair.req_buf);
299 	free(admin_qp.req_buf);
300 }
301 
302 static void test_nvme_completion_is_retry(void)
303 {
304 	struct spdk_nvme_cpl	cpl = {};
305 
306 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
307 	cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
308 	cpl.status.dnr = 0;
309 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
310 
311 	cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
312 	cpl.status.dnr = 1;
313 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
314 	cpl.status.dnr = 0;
315 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
316 
317 	cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
318 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
319 
320 	cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
321 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
322 
323 	cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
324 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
325 
326 	cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
327 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
328 
329 	cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
330 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
331 
332 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
333 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
334 
335 	cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
336 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
337 
338 	cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
339 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
340 
341 	cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
342 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
343 
344 	cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
345 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
346 
347 	cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
348 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
349 
350 	cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
351 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
352 
353 	cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
354 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
355 
356 	cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
357 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
358 
359 	cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
360 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
361 
362 	cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
363 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
364 
365 	cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
366 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
367 
368 	cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
369 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
370 
371 	cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
372 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
373 
374 	cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
375 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
376 
377 	cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
378 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
379 
380 	cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
381 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
382 
383 	cpl.status.sc = 0x70;
384 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
385 
386 	cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
387 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
388 
389 	cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
390 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
391 
392 	cpl.status.sct = SPDK_NVME_SCT_PATH;
393 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
394 	cpl.status.dnr = 0;
395 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
396 
397 	cpl.status.sct = SPDK_NVME_SCT_PATH;
398 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
399 	cpl.status.dnr = 1;
400 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
401 
402 	cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
403 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
404 
405 	cpl.status.sct = 0x4;
406 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
407 }
408 
409 #ifdef DEBUG
410 static void
411 test_get_status_string(void)
412 {
413 	const char	*status_string;
414 	struct spdk_nvme_status status;
415 
416 	status.sct = SPDK_NVME_SCT_GENERIC;
417 	status.sc = SPDK_NVME_SC_SUCCESS;
418 	status_string = spdk_nvme_cpl_get_status_string(&status);
419 	CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
420 
421 	status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
422 	status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
423 	status_string = spdk_nvme_cpl_get_status_string(&status);
424 	CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
425 
426 	status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
427 	status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
428 	status_string = spdk_nvme_cpl_get_status_string(&status);
429 	CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
430 
431 	status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
432 	status.sc = 0;
433 	status_string = spdk_nvme_cpl_get_status_string(&status);
434 	CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
435 
436 	status.sct = 0x4;
437 	status.sc = 0;
438 	status_string = spdk_nvme_cpl_get_status_string(&status);
439 	CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
440 }
441 #endif
442 
443 static void
444 test_nvme_qpair_add_cmd_error_injection(void)
445 {
446 	struct spdk_nvme_qpair qpair = {};
447 	struct spdk_nvme_ctrlr ctrlr = {};
448 	int rc;
449 
450 	prepare_submit_request_test(&qpair, &ctrlr);
451 	ctrlr.adminq = &qpair;
452 
453 	/* Admin error injection at submission path */
454 	MOCK_CLEAR(spdk_zmalloc);
455 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
456 			SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
457 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
458 
459 	CU_ASSERT(rc == 0);
460 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
461 
462 	/* Remove cmd error injection */
463 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
464 
465 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
466 
467 	/* IO error injection at completion path */
468 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
469 			SPDK_NVME_OPC_READ, false, 0, 1,
470 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
471 
472 	CU_ASSERT(rc == 0);
473 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
474 
475 	/* Provide the same opc, and check whether allocate a new entry */
476 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
477 			SPDK_NVME_OPC_READ, false, 0, 1,
478 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
479 
480 	CU_ASSERT(rc == 0);
481 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
482 	CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
483 
484 	/* Remove cmd error injection */
485 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
486 
487 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
488 
489 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
490 			SPDK_NVME_OPC_COMPARE, true, 0, 5,
491 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
492 
493 	CU_ASSERT(rc == 0);
494 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
495 
496 	/* Remove cmd error injection */
497 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
498 
499 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
500 
501 	cleanup_submit_request_test(&qpair);
502 }
503 
504 static struct nvme_request *
505 allocate_request_tree(struct spdk_nvme_qpair *qpair)
506 {
507 	struct nvme_request	*req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
508 
509 	/*
510 	 *  Build a request chain like the following:
511 	 *            req
512 	 *             |
513 	 *      ---------------
514 	 *     |       |       |
515 	 *    req1    req2    req3
516 	 *             |
517 	 *      ---------------
518 	 *     |       |       |
519 	 *   req2_1  req2_2  req2_3
520 	 */
521 	req = nvme_allocate_request_null(qpair, NULL, NULL);
522 	CU_ASSERT(req != NULL);
523 	TAILQ_INIT(&req->children);
524 
525 	req1 = nvme_allocate_request_null(qpair, NULL, NULL);
526 	CU_ASSERT(req1 != NULL);
527 	req->num_children++;
528 	TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
529 	req1->parent = req;
530 
531 	req2 = nvme_allocate_request_null(qpair, NULL, NULL);
532 	CU_ASSERT(req2 != NULL);
533 	TAILQ_INIT(&req2->children);
534 	req->num_children++;
535 	TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
536 	req2->parent = req;
537 
538 	req3 = nvme_allocate_request_null(qpair, NULL, NULL);
539 	CU_ASSERT(req3 != NULL);
540 	req->num_children++;
541 	TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
542 	req3->parent = req;
543 
544 	req2_1 = nvme_allocate_request_null(qpair, NULL, NULL);
545 	CU_ASSERT(req2_1 != NULL);
546 	req2->num_children++;
547 	TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
548 	req2_1->parent = req2;
549 
550 	req2_2 = nvme_allocate_request_null(qpair, NULL, NULL);
551 	CU_ASSERT(req2_2 != NULL);
552 	req2->num_children++;
553 	TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
554 	req2_2->parent = req2;
555 
556 	req2_3 = nvme_allocate_request_null(qpair, NULL, NULL);
557 	CU_ASSERT(req2_3 != NULL);
558 	req2->num_children++;
559 	TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
560 	req2_3->parent = req2;
561 
562 	return req;
563 }
564 
565 static void
566 test_nvme_qpair_submit_request(void)
567 {
568 	int				rc;
569 	struct spdk_nvme_qpair		qpair = {};
570 	struct spdk_nvme_ctrlr		ctrlr = {};
571 	struct nvme_request		*req;
572 
573 	prepare_submit_request_test(&qpair, &ctrlr);
574 
575 	req = allocate_request_tree(&qpair);
576 	ctrlr.is_failed = true;
577 	rc = nvme_qpair_submit_request(&qpair, req);
578 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
579 
580 	req = allocate_request_tree(&qpair);
581 	ctrlr.is_failed = false;
582 	qpair.state = NVME_QPAIR_DISCONNECTING;
583 	rc = nvme_qpair_submit_request(&qpair, req);
584 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
585 
586 	cleanup_submit_request_test(&qpair);
587 }
588 
589 static void
590 test_nvme_qpair_resubmit_request_with_transport_failed(void)
591 {
592 	int				rc;
593 	struct spdk_nvme_qpair		qpair = {};
594 	struct spdk_nvme_ctrlr		ctrlr = {};
595 	struct nvme_request		*req;
596 
597 	prepare_submit_request_test(&qpair, &ctrlr);
598 
599 	req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
600 	CU_ASSERT(req != NULL);
601 	TAILQ_INIT(&req->children);
602 
603 	STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
604 	req->queued = true;
605 
606 	g_transport_process_completions_rc = 1;
607 	qpair.state = NVME_QPAIR_ENABLED;
608 	g_num_cb_failed = 0;
609 	MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
610 	rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
611 	MOCK_CLEAR(nvme_transport_qpair_submit_request);
612 	CU_ASSERT(rc == g_transport_process_completions_rc);
613 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
614 	CU_ASSERT(g_num_cb_failed == 1);
615 
616 	cleanup_submit_request_test(&qpair);
617 }
618 
619 static void
620 ut_spdk_nvme_cmd_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
621 {
622 	CU_ASSERT(cb_arg == (void *)0xDEADBEEF);
623 	CU_ASSERT(cpl->sqid == 1);
624 	CU_ASSERT(cpl->status.sct == SPDK_NVME_SCT_GENERIC);
625 	CU_ASSERT(cpl->status.sc == SPDK_NVME_SC_SUCCESS);
626 	CU_ASSERT(cpl->status.dnr == 1);
627 }
628 
629 static void
630 test_nvme_qpair_manual_complete_request(void)
631 {
632 	struct spdk_nvme_qpair qpair = {};
633 	struct nvme_request req = {};
634 	struct spdk_nvme_ctrlr ctrlr = {};
635 
636 	qpair.ctrlr = &ctrlr;
637 	qpair.id = 1;
638 	req.cb_fn = ut_spdk_nvme_cmd_cb;
639 	req.cb_arg = (void *) 0xDEADBEEF;
640 	req.qpair = &qpair;
641 	req.num_children = 0;
642 	qpair.ctrlr->opts.disable_error_logging = false;
643 	STAILQ_INIT(&qpair.free_req);
644 	SPDK_CU_ASSERT_FATAL(STAILQ_EMPTY(&qpair.free_req));
645 
646 	nvme_qpair_manual_complete_request(&qpair, &req, SPDK_NVME_SCT_GENERIC,
647 					   SPDK_NVME_SC_SUCCESS, 1, true);
648 	CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
649 }
650 
651 static void
652 ut_spdk_nvme_cmd_cb_empty(void *cb_arg, const struct spdk_nvme_cpl *cpl)
653 {
654 
655 }
656 
657 static void
658 test_nvme_qpair_init_deinit(void)
659 {
660 	struct spdk_nvme_qpair qpair = {};
661 	struct nvme_request *reqs[3] = {};
662 	struct spdk_nvme_ctrlr ctrlr = {};
663 	struct nvme_error_cmd *cmd = NULL;
664 	struct nvme_request *var_req = NULL;
665 	int rc, i = 0;
666 
667 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
668 
669 	rc = nvme_qpair_init(&qpair, 1, &ctrlr, SPDK_NVME_QPRIO_HIGH, 3, false);
670 	CU_ASSERT(rc == 0);
671 	CU_ASSERT(qpair.id == 1);
672 	CU_ASSERT(qpair.qprio == SPDK_NVME_QPRIO_HIGH);
673 	CU_ASSERT(qpair.in_completion_context == 0);
674 	CU_ASSERT(qpair.delete_after_completion_context == 0);
675 	CU_ASSERT(qpair.no_deletion_notification_needed == 0);
676 	CU_ASSERT(qpair.ctrlr == &ctrlr);
677 	CU_ASSERT(qpair.trtype == SPDK_NVME_TRANSPORT_PCIE);
678 	CU_ASSERT(qpair.req_buf != NULL);
679 
680 	SPDK_CU_ASSERT_FATAL(!STAILQ_EMPTY(&qpair.free_req));
681 	STAILQ_FOREACH(var_req, &qpair.free_req, stailq) {
682 		/* Check requests address alignment */
683 		CU_ASSERT((uint64_t)var_req % 64 == 0);
684 		CU_ASSERT(var_req->qpair == &qpair);
685 		reqs[i++] = var_req;
686 	}
687 	CU_ASSERT(i == 3);
688 
689 	/* Allocate cmd memory for deinit using */
690 	cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
691 	SPDK_CU_ASSERT_FATAL(cmd != NULL);
692 	TAILQ_INSERT_TAIL(&qpair.err_cmd_head, cmd, link);
693 	for (int i = 0; i < 3; i++) {
694 		reqs[i]->cb_fn = ut_spdk_nvme_cmd_cb_empty;
695 		reqs[i]->cb_arg = (void *) 0xDEADBEEF;
696 		reqs[i]->num_children = 0;
697 	}
698 
699 	/* Emulate requests into various type queues */
700 	STAILQ_REMOVE(&qpair.free_req, reqs[0], nvme_request, stailq);
701 	STAILQ_INSERT_TAIL(&qpair.queued_req, reqs[0], stailq);
702 	STAILQ_REMOVE(&qpair.free_req, reqs[1], nvme_request, stailq);
703 	STAILQ_INSERT_TAIL(&qpair.aborting_queued_req, reqs[1], stailq);
704 	STAILQ_REMOVE(&qpair.free_req, reqs[2], nvme_request, stailq);
705 	STAILQ_INSERT_TAIL(&qpair.err_req_head, reqs[2], stailq);
706 	CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
707 
708 	nvme_qpair_deinit(&qpair);
709 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
710 	CU_ASSERT(STAILQ_EMPTY(&qpair.aborting_queued_req));
711 	CU_ASSERT(STAILQ_EMPTY(&qpair.err_req_head));
712 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
713 }
714 
715 static void
716 test_nvme_get_sgl_print_info(void)
717 {
718 	char buf[NVME_CMD_DPTR_STR_SIZE] = {};
719 	struct spdk_nvme_cmd cmd = {};
720 
721 	cmd.dptr.sgl1.keyed.length = 0x1000;
722 	cmd.dptr.sgl1.keyed.key = 0xababccdd;
723 
724 	nvme_get_sgl_keyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
725 	CU_ASSERT(!strncmp(buf, " len:0x1000 key:0xababccdd", NVME_CMD_DPTR_STR_SIZE));
726 
727 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
728 	cmd.dptr.sgl1.unkeyed.length = 0x1000;
729 
730 	nvme_get_sgl_unkeyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
731 	CU_ASSERT(!strncmp(buf, " len:0x1000", NVME_CMD_DPTR_STR_SIZE));
732 
733 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
734 	cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
735 	cmd.dptr.sgl1.generic.subtype = 0;
736 	cmd.dptr.sgl1.address = 0xdeadbeef;
737 	cmd.dptr.sgl1.keyed.length = 0x1000;
738 	cmd.dptr.sgl1.keyed.key = 0xababccdd;
739 
740 	nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
741 	CU_ASSERT(!strncmp(buf, "SGL DATA BLOCK ADDRESS 0xdeadbeef len:0x1000 key:0xababccdd",
742 			   NVME_CMD_DPTR_STR_SIZE));
743 
744 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
745 	cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
746 	cmd.dptr.sgl1.generic.subtype = 0;
747 	cmd.dptr.sgl1.address = 0xdeadbeef;
748 	cmd.dptr.sgl1.unkeyed.length = 0x1000;
749 
750 	nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
751 	CU_ASSERT(!strncmp(buf, "SGL RESERVED ADDRESS 0xdeadbeef len:0x1000",
752 			   NVME_CMD_DPTR_STR_SIZE));
753 }
754 
755 int main(int argc, char **argv)
756 {
757 	CU_pSuite	suite = NULL;
758 	unsigned int	num_failures;
759 
760 	CU_set_error_action(CUEA_ABORT);
761 	CU_initialize_registry();
762 
763 	suite = CU_add_suite("nvme_qpair", NULL, NULL);
764 
765 	CU_ADD_TEST(suite, test3);
766 	CU_ADD_TEST(suite, test_ctrlr_failed);
767 	CU_ADD_TEST(suite, struct_packing);
768 	CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
769 	CU_ADD_TEST(suite, test_nvme_completion_is_retry);
770 #ifdef DEBUG
771 	CU_ADD_TEST(suite, test_get_status_string);
772 #endif
773 	CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
774 	CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
775 	CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
776 	CU_ADD_TEST(suite, test_nvme_qpair_manual_complete_request);
777 	CU_ADD_TEST(suite, test_nvme_qpair_init_deinit);
778 	CU_ADD_TEST(suite, test_nvme_get_sgl_print_info);
779 
780 	CU_basic_set_mode(CU_BRM_VERBOSE);
781 	CU_basic_run_tests();
782 	num_failures = CU_get_number_of_failures();
783 	CU_cleanup_registry();
784 	return num_failures;
785 }
786