xref: /spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c (revision d73077b84a71985da1db1c9847ea7c042189bae2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "common/lib/test_env.c"
39 
40 pid_t g_spdk_nvme_pid;
41 
42 bool trace_flag = false;
43 #define SPDK_LOG_NVME trace_flag
44 
45 #include "nvme/nvme_qpair.c"
46 
47 SPDK_LOG_REGISTER_COMPONENT(nvme)
48 
49 struct nvme_driver _g_nvme_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 };
52 
53 DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
54 DEFINE_STUB(nvme_transport_qpair_submit_request, int,
55 	    (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
56 DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
57 DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
58 		struct spdk_nvme_qpair *qpair));
59 DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
60 
61 void
62 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
63 {
64 	if (hot_remove) {
65 		ctrlr->is_removed = true;
66 	}
67 	ctrlr->is_failed = true;
68 }
69 
70 static bool g_called_transport_process_completions = false;
71 static int32_t g_transport_process_completions_rc = 0;
72 int32_t
73 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
74 {
75 	g_called_transport_process_completions = true;
76 	return g_transport_process_completions_rc;
77 }
78 
79 static void
80 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
81 			    struct spdk_nvme_ctrlr *ctrlr)
82 {
83 	memset(ctrlr, 0, sizeof(*ctrlr));
84 	ctrlr->free_io_qids = NULL;
85 	TAILQ_INIT(&ctrlr->active_io_qpairs);
86 	TAILQ_INIT(&ctrlr->active_procs);
87 	MOCK_CLEAR(spdk_zmalloc);
88 	nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
89 }
90 
91 static void
92 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
93 {
94 	free(qpair->req_buf);
95 }
96 
97 static void
98 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
99 {
100 	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
101 }
102 
103 static void
104 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
105 {
106 	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
107 }
108 
109 static void
110 test3(void)
111 {
112 	struct spdk_nvme_qpair		qpair = {};
113 	struct nvme_request		*req;
114 	struct spdk_nvme_ctrlr		ctrlr = {};
115 
116 	qpair.state = NVME_QPAIR_ENABLED;
117 	prepare_submit_request_test(&qpair, &ctrlr);
118 
119 	req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
120 	SPDK_CU_ASSERT_FATAL(req != NULL);
121 
122 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
123 
124 	nvme_free_request(req);
125 
126 	cleanup_submit_request_test(&qpair);
127 }
128 
129 static void
130 test_ctrlr_failed(void)
131 {
132 	struct spdk_nvme_qpair		qpair = {};
133 	struct nvme_request		*req;
134 	struct spdk_nvme_ctrlr		ctrlr = {};
135 	char				payload[4096];
136 
137 	prepare_submit_request_test(&qpair, &ctrlr);
138 
139 	req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
140 					   NULL);
141 	SPDK_CU_ASSERT_FATAL(req != NULL);
142 
143 	/* Set the controller to failed.
144 	 * Set the controller to resetting so that the qpair won't get re-enabled.
145 	 */
146 	ctrlr.is_failed = true;
147 	ctrlr.is_resetting = true;
148 
149 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
150 
151 	cleanup_submit_request_test(&qpair);
152 }
153 
154 static void struct_packing(void)
155 {
156 	/* ctrlr is the first field in nvme_qpair after the fields
157 	 * that are used in the I/O path. Make sure the I/O path fields
158 	 * all fit into two cache lines.
159 	 */
160 	CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
161 }
162 
163 static int g_num_cb_failed = 0;
164 static int g_num_cb_passed = 0;
165 
166 static void
167 dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
168 {
169 	if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
170 		g_num_cb_passed++;
171 	} else {
172 		g_num_cb_failed++;
173 	}
174 }
175 
176 static void test_nvme_qpair_process_completions(void)
177 {
178 	struct spdk_nvme_qpair		admin_qp = {0};
179 	struct spdk_nvme_qpair		qpair = {0};
180 	struct spdk_nvme_ctrlr		ctrlr = {0};
181 	struct nvme_request		dummy_1 = {{0}};
182 	struct nvme_request		dummy_2 = {{0}};
183 	int				rc;
184 
185 	dummy_1.cb_fn = dummy_cb_fn;
186 	dummy_2.cb_fn = dummy_cb_fn;
187 	dummy_1.qpair = &qpair;
188 	dummy_2.qpair = &qpair;
189 
190 	TAILQ_INIT(&ctrlr.active_io_qpairs);
191 	TAILQ_INIT(&ctrlr.active_procs);
192 	nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32);
193 	nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32);
194 
195 	ctrlr.adminq = &admin_qp;
196 
197 	STAILQ_INIT(&qpair.queued_req);
198 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
199 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
200 
201 	/* If the controller is failed, return -ENXIO */
202 	ctrlr.is_failed = true;
203 	ctrlr.is_removed = false;
204 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
205 	CU_ASSERT(rc == -ENXIO);
206 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
207 	CU_ASSERT(g_num_cb_passed == 0);
208 	CU_ASSERT(g_num_cb_failed == 0);
209 
210 	/* Same if the qpair is failed at the transport layer. */
211 	ctrlr.is_failed = false;
212 	ctrlr.is_removed = false;
213 	qpair.state = NVME_QPAIR_DISCONNECTED;
214 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
215 	CU_ASSERT(rc == -ENXIO);
216 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
217 	CU_ASSERT(g_num_cb_passed == 0);
218 	CU_ASSERT(g_num_cb_failed == 0);
219 
220 	/* If the controller is removed, make sure we abort the requests. */
221 	ctrlr.is_failed = true;
222 	ctrlr.is_removed = true;
223 	qpair.state = NVME_QPAIR_CONNECTED;
224 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
225 	CU_ASSERT(rc == -ENXIO);
226 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
227 	CU_ASSERT(g_num_cb_passed == 0);
228 	CU_ASSERT(g_num_cb_failed == 2);
229 
230 	/* If we are resetting, make sure that we don't call into the transport. */
231 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
232 	dummy_1.queued = true;
233 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
234 	dummy_2.queued = true;
235 	g_num_cb_failed = 0;
236 	ctrlr.is_failed = false;
237 	ctrlr.is_removed = false;
238 	ctrlr.is_resetting = true;
239 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
240 	CU_ASSERT(rc == -ENXIO);
241 	CU_ASSERT(g_called_transport_process_completions == false);
242 	/* We also need to make sure we didn't abort the requests. */
243 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
244 	CU_ASSERT(g_num_cb_passed == 0);
245 	CU_ASSERT(g_num_cb_failed == 0);
246 
247 	/* The case where we aren't resetting, but are enabling the qpair is the same as above. */
248 	ctrlr.is_resetting = false;
249 	qpair.state = NVME_QPAIR_ENABLING;
250 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
251 	CU_ASSERT(rc == -ENXIO);
252 	CU_ASSERT(g_called_transport_process_completions == false);
253 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
254 	CU_ASSERT(g_num_cb_passed == 0);
255 	CU_ASSERT(g_num_cb_failed == 0);
256 
257 	/* For other qpair states, we want to enable the qpair. */
258 	qpair.state = NVME_QPAIR_CONNECTED;
259 	rc = spdk_nvme_qpair_process_completions(&qpair, 1);
260 	CU_ASSERT(rc == 0);
261 	CU_ASSERT(g_called_transport_process_completions == true);
262 	/* These should have been submitted to the lower layer. */
263 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
264 	CU_ASSERT(g_num_cb_passed == 0);
265 	CU_ASSERT(g_num_cb_failed == 0);
266 	CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
267 
268 	g_called_transport_process_completions = false;
269 	g_transport_process_completions_rc = -ENXIO;
270 
271 	/* Fail the controller if we get an error from the transport on admin qpair. */
272 	admin_qp.state = NVME_QPAIR_ENABLED;
273 	rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
274 	CU_ASSERT(rc == -ENXIO);
275 	CU_ASSERT(g_called_transport_process_completions == true);
276 	CU_ASSERT(ctrlr.is_failed == true);
277 
278 	/* Don't fail the controller for regular qpairs. */
279 	ctrlr.is_failed = false;
280 	g_called_transport_process_completions = false;
281 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
282 	CU_ASSERT(rc == -ENXIO);
283 	CU_ASSERT(g_called_transport_process_completions == true);
284 	CU_ASSERT(ctrlr.is_failed == false);
285 
286 	/* Make sure we don't modify the return value from the transport. */
287 	ctrlr.is_failed = false;
288 	g_called_transport_process_completions = false;
289 	g_transport_process_completions_rc = 23;
290 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
291 	CU_ASSERT(rc == 23);
292 	CU_ASSERT(g_called_transport_process_completions == true);
293 	CU_ASSERT(ctrlr.is_failed == false);
294 
295 	free(qpair.req_buf);
296 	free(admin_qp.req_buf);
297 }
298 
299 static void test_nvme_completion_is_retry(void)
300 {
301 	struct spdk_nvme_cpl	cpl = {};
302 
303 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
304 	cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
305 	cpl.status.dnr = 0;
306 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
307 
308 	cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
309 	cpl.status.dnr = 1;
310 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
311 	cpl.status.dnr = 0;
312 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
313 
314 	cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
315 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
316 
317 	cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
318 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
319 
320 	cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
321 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
322 
323 	cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
324 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
325 
326 	cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
327 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
328 
329 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
330 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
331 
332 	cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
333 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
334 
335 	cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
336 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
337 
338 	cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
339 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
340 
341 	cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
342 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
343 
344 	cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
345 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
346 
347 	cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
348 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
349 
350 	cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
351 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
352 
353 	cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
354 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
355 
356 	cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
357 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
358 
359 	cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
360 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
361 
362 	cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
363 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
364 
365 	cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
366 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
367 
368 	cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
369 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
370 
371 	cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
372 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
373 
374 	cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
375 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
376 
377 	cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
378 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
379 
380 	cpl.status.sc = 0x70;
381 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
382 
383 	cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
384 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
385 
386 	cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
387 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
388 
389 	cpl.status.sct = SPDK_NVME_SCT_PATH;
390 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
391 	cpl.status.dnr = 0;
392 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
393 
394 	cpl.status.sct = SPDK_NVME_SCT_PATH;
395 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
396 	cpl.status.dnr = 1;
397 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
398 
399 	cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
400 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
401 
402 	cpl.status.sct = 0x4;
403 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
404 }
405 
406 #ifdef DEBUG
407 static void
408 test_get_status_string(void)
409 {
410 	const char	*status_string;
411 	struct spdk_nvme_status status;
412 
413 	status.sct = SPDK_NVME_SCT_GENERIC;
414 	status.sc = SPDK_NVME_SC_SUCCESS;
415 	status_string = spdk_nvme_cpl_get_status_string(&status);
416 	CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
417 
418 	status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
419 	status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
420 	status_string = spdk_nvme_cpl_get_status_string(&status);
421 	CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
422 
423 	status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
424 	status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
425 	status_string = spdk_nvme_cpl_get_status_string(&status);
426 	CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
427 
428 	status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
429 	status.sc = 0;
430 	status_string = spdk_nvme_cpl_get_status_string(&status);
431 	CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
432 
433 	status.sct = 0x4;
434 	status.sc = 0;
435 	status_string = spdk_nvme_cpl_get_status_string(&status);
436 	CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
437 }
438 #endif
439 
440 static void
441 test_nvme_qpair_add_cmd_error_injection(void)
442 {
443 	struct spdk_nvme_qpair qpair = {};
444 	struct spdk_nvme_ctrlr ctrlr = {};
445 	int rc;
446 
447 	prepare_submit_request_test(&qpair, &ctrlr);
448 	ctrlr.adminq = &qpair;
449 
450 	/* Admin error injection at submission path */
451 	MOCK_CLEAR(spdk_zmalloc);
452 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
453 			SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
454 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
455 
456 	CU_ASSERT(rc == 0);
457 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
458 
459 	/* Remove cmd error injection */
460 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
461 
462 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
463 
464 	/* IO error injection at completion path */
465 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
466 			SPDK_NVME_OPC_READ, false, 0, 1,
467 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
468 
469 	CU_ASSERT(rc == 0);
470 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
471 
472 	/* Provide the same opc, and check whether allocate a new entry */
473 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
474 			SPDK_NVME_OPC_READ, false, 0, 1,
475 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
476 
477 	CU_ASSERT(rc == 0);
478 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
479 	CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
480 
481 	/* Remove cmd error injection */
482 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
483 
484 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
485 
486 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
487 			SPDK_NVME_OPC_COMPARE, true, 0, 5,
488 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
489 
490 	CU_ASSERT(rc == 0);
491 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
492 
493 	/* Remove cmd error injection */
494 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
495 
496 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
497 
498 	cleanup_submit_request_test(&qpair);
499 }
500 
501 static struct nvme_request *
502 allocate_request_tree(struct spdk_nvme_qpair *qpair)
503 {
504 	struct nvme_request	*req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
505 
506 	/*
507 	 *  Build a request chain like the following:
508 	 *            req
509 	 *             |
510 	 *      ---------------
511 	 *     |       |       |
512 	 *    req1    req2    req3
513 	 *             |
514 	 *      ---------------
515 	 *     |       |       |
516 	 *   req2_1  req2_2  req2_3
517 	 */
518 	req = nvme_allocate_request_null(qpair, NULL, NULL);
519 	CU_ASSERT(req != NULL);
520 	TAILQ_INIT(&req->children);
521 
522 	req1 = nvme_allocate_request_null(qpair, NULL, NULL);
523 	CU_ASSERT(req1 != NULL);
524 	req->num_children++;
525 	TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
526 	req1->parent = req;
527 
528 	req2 = nvme_allocate_request_null(qpair, NULL, NULL);
529 	CU_ASSERT(req2 != NULL);
530 	TAILQ_INIT(&req2->children);
531 	req->num_children++;
532 	TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
533 	req2->parent = req;
534 
535 	req3 = nvme_allocate_request_null(qpair, NULL, NULL);
536 	CU_ASSERT(req3 != NULL);
537 	req->num_children++;
538 	TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
539 	req3->parent = req;
540 
541 	req2_1 = nvme_allocate_request_null(qpair, NULL, NULL);
542 	CU_ASSERT(req2_1 != NULL);
543 	req2->num_children++;
544 	TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
545 	req2_1->parent = req2;
546 
547 	req2_2 = nvme_allocate_request_null(qpair, NULL, NULL);
548 	CU_ASSERT(req2_2 != NULL);
549 	req2->num_children++;
550 	TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
551 	req2_2->parent = req2;
552 
553 	req2_3 = nvme_allocate_request_null(qpair, NULL, NULL);
554 	CU_ASSERT(req2_3 != NULL);
555 	req2->num_children++;
556 	TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
557 	req2_3->parent = req2;
558 
559 	return req;
560 }
561 
562 static void
563 test_nvme_qpair_submit_request(void)
564 {
565 	int				rc;
566 	struct spdk_nvme_qpair		qpair = {};
567 	struct spdk_nvme_ctrlr		ctrlr = {};
568 	struct nvme_request		*req;
569 
570 	prepare_submit_request_test(&qpair, &ctrlr);
571 
572 	req = allocate_request_tree(&qpair);
573 	ctrlr.is_failed = true;
574 	rc = nvme_qpair_submit_request(&qpair, req);
575 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
576 
577 	req = allocate_request_tree(&qpair);
578 	ctrlr.is_failed = false;
579 	qpair.state = NVME_QPAIR_DISCONNECTING;
580 	rc = nvme_qpair_submit_request(&qpair, req);
581 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
582 
583 	cleanup_submit_request_test(&qpair);
584 }
585 
586 static void
587 test_nvme_qpair_resubmit_request_with_transport_failed(void)
588 {
589 	int				rc;
590 	struct spdk_nvme_qpair		qpair = {};
591 	struct spdk_nvme_ctrlr		ctrlr = {};
592 	struct nvme_request		*req;
593 
594 	prepare_submit_request_test(&qpair, &ctrlr);
595 
596 	req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
597 	CU_ASSERT(req != NULL);
598 	TAILQ_INIT(&req->children);
599 
600 	STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
601 	req->queued = true;
602 
603 	g_transport_process_completions_rc = 1;
604 	qpair.state = NVME_QPAIR_ENABLED;
605 	g_num_cb_failed = 0;
606 	MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
607 	rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
608 	MOCK_CLEAR(nvme_transport_qpair_submit_request);
609 	CU_ASSERT(rc == g_transport_process_completions_rc);
610 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
611 	CU_ASSERT(g_num_cb_failed == 1);
612 
613 	cleanup_submit_request_test(&qpair);
614 }
615 
616 int main(int argc, char **argv)
617 {
618 	CU_pSuite	suite = NULL;
619 	unsigned int	num_failures;
620 
621 	CU_set_error_action(CUEA_ABORT);
622 	CU_initialize_registry();
623 
624 	suite = CU_add_suite("nvme_qpair", NULL, NULL);
625 
626 	CU_ADD_TEST(suite, test3);
627 	CU_ADD_TEST(suite, test_ctrlr_failed);
628 	CU_ADD_TEST(suite, struct_packing);
629 	CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
630 	CU_ADD_TEST(suite, test_nvme_completion_is_retry);
631 #ifdef DEBUG
632 	CU_ADD_TEST(suite, test_get_status_string);
633 #endif
634 	CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
635 	CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
636 	CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
637 
638 	CU_basic_set_mode(CU_BRM_VERBOSE);
639 	CU_basic_run_tests();
640 	num_failures = CU_get_number_of_failures();
641 	CU_cleanup_registry();
642 	return num_failures;
643 }
644