xref: /spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c (revision 32999ab917f67af61872f868585fd3d78ad6fb8a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "common/lib/test_env.c"
39 
40 pid_t g_spdk_nvme_pid;
41 
42 bool trace_flag = false;
43 #define SPDK_LOG_NVME trace_flag
44 
45 #include "nvme/nvme_qpair.c"
46 
47 SPDK_LOG_REGISTER_COMPONENT(nvme)
48 
49 struct nvme_driver _g_nvme_driver = {
50 	.lock = PTHREAD_MUTEX_INITIALIZER,
51 };
52 
53 DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
54 DEFINE_STUB(nvme_transport_qpair_submit_request, int,
55 	    (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
56 DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
57 DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
58 		struct spdk_nvme_qpair *qpair));
59 DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
60 
61 DEFINE_STUB_V(nvme_ctrlr_complete_queued_async_events, (struct spdk_nvme_ctrlr *ctrlr));
62 
63 void
64 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
65 {
66 	if (hot_remove) {
67 		ctrlr->is_removed = true;
68 	}
69 	ctrlr->is_failed = true;
70 }
71 
72 static bool g_called_transport_process_completions = false;
73 static int32_t g_transport_process_completions_rc = 0;
74 int32_t
75 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
76 {
77 	g_called_transport_process_completions = true;
78 	return g_transport_process_completions_rc;
79 }
80 
81 static void
82 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
83 			    struct spdk_nvme_ctrlr *ctrlr)
84 {
85 	memset(ctrlr, 0, sizeof(*ctrlr));
86 	ctrlr->free_io_qids = NULL;
87 	TAILQ_INIT(&ctrlr->active_io_qpairs);
88 	TAILQ_INIT(&ctrlr->active_procs);
89 	MOCK_CLEAR(spdk_zmalloc);
90 	nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
91 }
92 
93 static void
94 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
95 {
96 	free(qpair->req_buf);
97 }
98 
99 static void
100 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
101 {
102 	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
103 }
104 
105 static void
106 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
107 {
108 	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
109 }
110 
111 static void
112 test3(void)
113 {
114 	struct spdk_nvme_qpair		qpair = {};
115 	struct nvme_request		*req;
116 	struct spdk_nvme_ctrlr		ctrlr = {};
117 
118 	qpair.state = NVME_QPAIR_ENABLED;
119 	prepare_submit_request_test(&qpair, &ctrlr);
120 
121 	req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
122 	SPDK_CU_ASSERT_FATAL(req != NULL);
123 
124 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
125 
126 	nvme_free_request(req);
127 
128 	cleanup_submit_request_test(&qpair);
129 }
130 
131 static void
132 test_ctrlr_failed(void)
133 {
134 	struct spdk_nvme_qpair		qpair = {};
135 	struct nvme_request		*req;
136 	struct spdk_nvme_ctrlr		ctrlr = {};
137 	char				payload[4096];
138 
139 	prepare_submit_request_test(&qpair, &ctrlr);
140 
141 	req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
142 					   NULL);
143 	SPDK_CU_ASSERT_FATAL(req != NULL);
144 
145 	/* Set the controller to failed.
146 	 * Set the controller to resetting so that the qpair won't get re-enabled.
147 	 */
148 	ctrlr.is_failed = true;
149 	ctrlr.is_resetting = true;
150 
151 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
152 
153 	cleanup_submit_request_test(&qpair);
154 }
155 
156 static void struct_packing(void)
157 {
158 	/* ctrlr is the first field in nvme_qpair after the fields
159 	 * that are used in the I/O path. Make sure the I/O path fields
160 	 * all fit into two cache lines.
161 	 */
162 	CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
163 }
164 
165 static int g_num_cb_failed = 0;
166 static int g_num_cb_passed = 0;
167 
168 static void
169 dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
170 {
171 	if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
172 		g_num_cb_passed++;
173 	} else {
174 		g_num_cb_failed++;
175 	}
176 }
177 
178 static void test_nvme_qpair_process_completions(void)
179 {
180 	struct spdk_nvme_qpair		admin_qp = {0};
181 	struct spdk_nvme_qpair		qpair = {0};
182 	struct spdk_nvme_ctrlr		ctrlr = {0};
183 	struct nvme_request		dummy_1 = {{0}};
184 	struct nvme_request		dummy_2 = {{0}};
185 	int				rc;
186 
187 	dummy_1.cb_fn = dummy_cb_fn;
188 	dummy_2.cb_fn = dummy_cb_fn;
189 	dummy_1.qpair = &qpair;
190 	dummy_2.qpair = &qpair;
191 
192 	TAILQ_INIT(&ctrlr.active_io_qpairs);
193 	TAILQ_INIT(&ctrlr.active_procs);
194 	nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32);
195 	nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32);
196 
197 	ctrlr.adminq = &admin_qp;
198 
199 	STAILQ_INIT(&qpair.queued_req);
200 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
201 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
202 
203 	/* If the controller is failed, return -ENXIO */
204 	ctrlr.is_failed = true;
205 	ctrlr.is_removed = false;
206 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
207 	CU_ASSERT(rc == -ENXIO);
208 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
209 	CU_ASSERT(g_num_cb_passed == 0);
210 	CU_ASSERT(g_num_cb_failed == 0);
211 
212 	/* Same if the qpair is failed at the transport layer. */
213 	ctrlr.is_failed = false;
214 	ctrlr.is_removed = false;
215 	qpair.state = NVME_QPAIR_DISCONNECTED;
216 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
217 	CU_ASSERT(rc == -ENXIO);
218 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
219 	CU_ASSERT(g_num_cb_passed == 0);
220 	CU_ASSERT(g_num_cb_failed == 0);
221 
222 	/* If the controller is removed, make sure we abort the requests. */
223 	ctrlr.is_failed = true;
224 	ctrlr.is_removed = true;
225 	qpair.state = NVME_QPAIR_CONNECTED;
226 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
227 	CU_ASSERT(rc == -ENXIO);
228 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
229 	CU_ASSERT(g_num_cb_passed == 0);
230 	CU_ASSERT(g_num_cb_failed == 2);
231 
232 	/* If we are resetting, make sure that we don't call into the transport. */
233 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
234 	dummy_1.queued = true;
235 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
236 	dummy_2.queued = true;
237 	g_num_cb_failed = 0;
238 	ctrlr.is_failed = false;
239 	ctrlr.is_removed = false;
240 	ctrlr.is_resetting = true;
241 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
242 	CU_ASSERT(rc == -ENXIO);
243 	CU_ASSERT(g_called_transport_process_completions == false);
244 	/* We also need to make sure we didn't abort the requests. */
245 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
246 	CU_ASSERT(g_num_cb_passed == 0);
247 	CU_ASSERT(g_num_cb_failed == 0);
248 
249 	/* The case where we aren't resetting, but are enabling the qpair is the same as above. */
250 	ctrlr.is_resetting = false;
251 	qpair.state = NVME_QPAIR_ENABLING;
252 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
253 	CU_ASSERT(rc == -ENXIO);
254 	CU_ASSERT(g_called_transport_process_completions == false);
255 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
256 	CU_ASSERT(g_num_cb_passed == 0);
257 	CU_ASSERT(g_num_cb_failed == 0);
258 
259 	/* For other qpair states, we want to enable the qpair. */
260 	qpair.state = NVME_QPAIR_CONNECTED;
261 	rc = spdk_nvme_qpair_process_completions(&qpair, 1);
262 	CU_ASSERT(rc == 0);
263 	CU_ASSERT(g_called_transport_process_completions == true);
264 	/* These should have been submitted to the lower layer. */
265 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
266 	CU_ASSERT(g_num_cb_passed == 0);
267 	CU_ASSERT(g_num_cb_failed == 0);
268 	CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
269 
270 	g_called_transport_process_completions = false;
271 	g_transport_process_completions_rc = -ENXIO;
272 
273 	/* Fail the controller if we get an error from the transport on admin qpair. */
274 	admin_qp.state = NVME_QPAIR_ENABLED;
275 	rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
276 	CU_ASSERT(rc == -ENXIO);
277 	CU_ASSERT(g_called_transport_process_completions == true);
278 	CU_ASSERT(ctrlr.is_failed == true);
279 
280 	/* Don't fail the controller for regular qpairs. */
281 	ctrlr.is_failed = false;
282 	g_called_transport_process_completions = false;
283 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
284 	CU_ASSERT(rc == -ENXIO);
285 	CU_ASSERT(g_called_transport_process_completions == true);
286 	CU_ASSERT(ctrlr.is_failed == false);
287 
288 	/* Make sure we don't modify the return value from the transport. */
289 	ctrlr.is_failed = false;
290 	g_called_transport_process_completions = false;
291 	g_transport_process_completions_rc = 23;
292 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
293 	CU_ASSERT(rc == 23);
294 	CU_ASSERT(g_called_transport_process_completions == true);
295 	CU_ASSERT(ctrlr.is_failed == false);
296 
297 	free(qpair.req_buf);
298 	free(admin_qp.req_buf);
299 }
300 
301 static void test_nvme_completion_is_retry(void)
302 {
303 	struct spdk_nvme_cpl	cpl = {};
304 
305 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
306 	cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
307 	cpl.status.dnr = 0;
308 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
309 
310 	cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
311 	cpl.status.dnr = 1;
312 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
313 	cpl.status.dnr = 0;
314 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
315 
316 	cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
317 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
318 
319 	cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
320 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
321 
322 	cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
323 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
324 
325 	cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
326 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
327 
328 	cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
329 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
330 
331 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
332 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
333 
334 	cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
335 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
336 
337 	cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
338 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
339 
340 	cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
341 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
342 
343 	cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
344 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
345 
346 	cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
347 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
348 
349 	cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
350 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
351 
352 	cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
353 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
354 
355 	cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
356 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
357 
358 	cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
359 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
360 
361 	cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
362 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
363 
364 	cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
365 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
366 
367 	cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
368 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
369 
370 	cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
371 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
372 
373 	cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
374 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
375 
376 	cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
377 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
378 
379 	cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
380 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
381 
382 	cpl.status.sc = 0x70;
383 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
384 
385 	cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
386 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
387 
388 	cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
389 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
390 
391 	cpl.status.sct = SPDK_NVME_SCT_PATH;
392 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
393 	cpl.status.dnr = 0;
394 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
395 
396 	cpl.status.sct = SPDK_NVME_SCT_PATH;
397 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
398 	cpl.status.dnr = 1;
399 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
400 
401 	cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
402 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
403 
404 	cpl.status.sct = 0x4;
405 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
406 }
407 
408 #ifdef DEBUG
409 static void
410 test_get_status_string(void)
411 {
412 	const char	*status_string;
413 	struct spdk_nvme_status status;
414 
415 	status.sct = SPDK_NVME_SCT_GENERIC;
416 	status.sc = SPDK_NVME_SC_SUCCESS;
417 	status_string = spdk_nvme_cpl_get_status_string(&status);
418 	CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
419 
420 	status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
421 	status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
422 	status_string = spdk_nvme_cpl_get_status_string(&status);
423 	CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
424 
425 	status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
426 	status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
427 	status_string = spdk_nvme_cpl_get_status_string(&status);
428 	CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
429 
430 	status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
431 	status.sc = 0;
432 	status_string = spdk_nvme_cpl_get_status_string(&status);
433 	CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
434 
435 	status.sct = 0x4;
436 	status.sc = 0;
437 	status_string = spdk_nvme_cpl_get_status_string(&status);
438 	CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
439 }
440 #endif
441 
442 static void
443 test_nvme_qpair_add_cmd_error_injection(void)
444 {
445 	struct spdk_nvme_qpair qpair = {};
446 	struct spdk_nvme_ctrlr ctrlr = {};
447 	int rc;
448 
449 	prepare_submit_request_test(&qpair, &ctrlr);
450 	ctrlr.adminq = &qpair;
451 
452 	/* Admin error injection at submission path */
453 	MOCK_CLEAR(spdk_zmalloc);
454 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
455 			SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
456 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
457 
458 	CU_ASSERT(rc == 0);
459 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
460 
461 	/* Remove cmd error injection */
462 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
463 
464 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
465 
466 	/* IO error injection at completion path */
467 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
468 			SPDK_NVME_OPC_READ, false, 0, 1,
469 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
470 
471 	CU_ASSERT(rc == 0);
472 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
473 
474 	/* Provide the same opc, and check whether allocate a new entry */
475 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
476 			SPDK_NVME_OPC_READ, false, 0, 1,
477 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
478 
479 	CU_ASSERT(rc == 0);
480 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
481 	CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
482 
483 	/* Remove cmd error injection */
484 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
485 
486 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
487 
488 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
489 			SPDK_NVME_OPC_COMPARE, true, 0, 5,
490 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
491 
492 	CU_ASSERT(rc == 0);
493 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
494 
495 	/* Remove cmd error injection */
496 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
497 
498 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
499 
500 	cleanup_submit_request_test(&qpair);
501 }
502 
503 static struct nvme_request *
504 allocate_request_tree(struct spdk_nvme_qpair *qpair)
505 {
506 	struct nvme_request	*req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
507 
508 	/*
509 	 *  Build a request chain like the following:
510 	 *            req
511 	 *             |
512 	 *      ---------------
513 	 *     |       |       |
514 	 *    req1    req2    req3
515 	 *             |
516 	 *      ---------------
517 	 *     |       |       |
518 	 *   req2_1  req2_2  req2_3
519 	 */
520 	req = nvme_allocate_request_null(qpair, NULL, NULL);
521 	CU_ASSERT(req != NULL);
522 	TAILQ_INIT(&req->children);
523 
524 	req1 = nvme_allocate_request_null(qpair, NULL, NULL);
525 	CU_ASSERT(req1 != NULL);
526 	req->num_children++;
527 	TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
528 	req1->parent = req;
529 
530 	req2 = nvme_allocate_request_null(qpair, NULL, NULL);
531 	CU_ASSERT(req2 != NULL);
532 	TAILQ_INIT(&req2->children);
533 	req->num_children++;
534 	TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
535 	req2->parent = req;
536 
537 	req3 = nvme_allocate_request_null(qpair, NULL, NULL);
538 	CU_ASSERT(req3 != NULL);
539 	req->num_children++;
540 	TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
541 	req3->parent = req;
542 
543 	req2_1 = nvme_allocate_request_null(qpair, NULL, NULL);
544 	CU_ASSERT(req2_1 != NULL);
545 	req2->num_children++;
546 	TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
547 	req2_1->parent = req2;
548 
549 	req2_2 = nvme_allocate_request_null(qpair, NULL, NULL);
550 	CU_ASSERT(req2_2 != NULL);
551 	req2->num_children++;
552 	TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
553 	req2_2->parent = req2;
554 
555 	req2_3 = nvme_allocate_request_null(qpair, NULL, NULL);
556 	CU_ASSERT(req2_3 != NULL);
557 	req2->num_children++;
558 	TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
559 	req2_3->parent = req2;
560 
561 	return req;
562 }
563 
564 static void
565 test_nvme_qpair_submit_request(void)
566 {
567 	int				rc;
568 	struct spdk_nvme_qpair		qpair = {};
569 	struct spdk_nvme_ctrlr		ctrlr = {};
570 	struct nvme_request		*req;
571 
572 	prepare_submit_request_test(&qpair, &ctrlr);
573 
574 	req = allocate_request_tree(&qpair);
575 	ctrlr.is_failed = true;
576 	rc = nvme_qpair_submit_request(&qpair, req);
577 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
578 
579 	req = allocate_request_tree(&qpair);
580 	ctrlr.is_failed = false;
581 	qpair.state = NVME_QPAIR_DISCONNECTING;
582 	rc = nvme_qpair_submit_request(&qpair, req);
583 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
584 
585 	cleanup_submit_request_test(&qpair);
586 }
587 
588 static void
589 test_nvme_qpair_resubmit_request_with_transport_failed(void)
590 {
591 	int				rc;
592 	struct spdk_nvme_qpair		qpair = {};
593 	struct spdk_nvme_ctrlr		ctrlr = {};
594 	struct nvme_request		*req;
595 
596 	prepare_submit_request_test(&qpair, &ctrlr);
597 
598 	req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
599 	CU_ASSERT(req != NULL);
600 	TAILQ_INIT(&req->children);
601 
602 	STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
603 	req->queued = true;
604 
605 	g_transport_process_completions_rc = 1;
606 	qpair.state = NVME_QPAIR_ENABLED;
607 	g_num_cb_failed = 0;
608 	MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
609 	rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
610 	MOCK_CLEAR(nvme_transport_qpair_submit_request);
611 	CU_ASSERT(rc == g_transport_process_completions_rc);
612 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
613 	CU_ASSERT(g_num_cb_failed == 1);
614 
615 	cleanup_submit_request_test(&qpair);
616 }
617 
618 static void
619 ut_spdk_nvme_cmd_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
620 {
621 	CU_ASSERT(cb_arg == (void *)0xDEADBEEF);
622 	CU_ASSERT(cpl->sqid == 1);
623 	CU_ASSERT(cpl->status.sct == SPDK_NVME_SCT_GENERIC);
624 	CU_ASSERT(cpl->status.sc == SPDK_NVME_SC_SUCCESS);
625 	CU_ASSERT(cpl->status.dnr == 1);
626 }
627 
628 static void
629 test_nvme_qpair_manual_complete_request(void)
630 {
631 	struct spdk_nvme_qpair qpair = {};
632 	struct nvme_request req = {};
633 	struct spdk_nvme_ctrlr ctrlr = {};
634 
635 	qpair.ctrlr = &ctrlr;
636 	qpair.id = 1;
637 	req.cb_fn = ut_spdk_nvme_cmd_cb;
638 	req.cb_arg = (void *) 0xDEADBEEF;
639 	req.qpair = &qpair;
640 	req.num_children = 0;
641 	qpair.ctrlr->opts.disable_error_logging = false;
642 	STAILQ_INIT(&qpair.free_req);
643 	SPDK_CU_ASSERT_FATAL(STAILQ_EMPTY(&qpair.free_req));
644 
645 	nvme_qpair_manual_complete_request(&qpair, &req, SPDK_NVME_SCT_GENERIC,
646 					   SPDK_NVME_SC_SUCCESS, 1, true);
647 	CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
648 }
649 
650 static void
651 ut_spdk_nvme_cmd_cb_empty(void *cb_arg, const struct spdk_nvme_cpl *cpl)
652 {
653 
654 }
655 
656 static void
657 test_nvme_qpair_init_deinit(void)
658 {
659 	struct spdk_nvme_qpair qpair = {};
660 	struct nvme_request *reqs[3] = {};
661 	struct spdk_nvme_ctrlr ctrlr = {};
662 	struct nvme_error_cmd *cmd = NULL;
663 	struct nvme_request *var_req = NULL;
664 	int rc, i = 0;
665 
666 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
667 
668 	rc = nvme_qpair_init(&qpair, 1, &ctrlr, SPDK_NVME_QPRIO_HIGH, 3);
669 	CU_ASSERT(rc == 0);
670 	CU_ASSERT(qpair.id == 1);
671 	CU_ASSERT(qpair.qprio == SPDK_NVME_QPRIO_HIGH);
672 	CU_ASSERT(qpair.in_completion_context == 0);
673 	CU_ASSERT(qpair.delete_after_completion_context == 0);
674 	CU_ASSERT(qpair.no_deletion_notification_needed == 0);
675 	CU_ASSERT(qpair.ctrlr == &ctrlr);
676 	CU_ASSERT(qpair.trtype == SPDK_NVME_TRANSPORT_PCIE);
677 	CU_ASSERT(qpair.req_buf != NULL);
678 
679 	SPDK_CU_ASSERT_FATAL(!STAILQ_EMPTY(&qpair.free_req));
680 	STAILQ_FOREACH(var_req, &qpair.free_req, stailq) {
681 		/* Check requests address alignment */
682 		CU_ASSERT((uint64_t)var_req % 64 == 0);
683 		CU_ASSERT(var_req->qpair == &qpair);
684 		reqs[i++] = var_req;
685 	}
686 	CU_ASSERT(i == 3);
687 
688 	/* Allocate cmd memory for deinit using */
689 	cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
690 	SPDK_CU_ASSERT_FATAL(cmd != NULL);
691 	TAILQ_INSERT_TAIL(&qpair.err_cmd_head, cmd, link);
692 	for (int i = 0; i < 3; i++) {
693 		reqs[i]->cb_fn = ut_spdk_nvme_cmd_cb_empty;
694 		reqs[i]->cb_arg = (void *) 0xDEADBEEF;
695 		reqs[i]->num_children = 0;
696 	}
697 
698 	/* Emulate requests into various type queues */
699 	STAILQ_REMOVE(&qpair.free_req, reqs[0], nvme_request, stailq);
700 	STAILQ_INSERT_TAIL(&qpair.queued_req, reqs[0], stailq);
701 	STAILQ_REMOVE(&qpair.free_req, reqs[1], nvme_request, stailq);
702 	STAILQ_INSERT_TAIL(&qpair.aborting_queued_req, reqs[1], stailq);
703 	STAILQ_REMOVE(&qpair.free_req, reqs[2], nvme_request, stailq);
704 	STAILQ_INSERT_TAIL(&qpair.err_req_head, reqs[2], stailq);
705 	CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
706 
707 	nvme_qpair_deinit(&qpair);
708 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
709 	CU_ASSERT(STAILQ_EMPTY(&qpair.aborting_queued_req));
710 	CU_ASSERT(STAILQ_EMPTY(&qpair.err_req_head));
711 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
712 }
713 
714 int main(int argc, char **argv)
715 {
716 	CU_pSuite	suite = NULL;
717 	unsigned int	num_failures;
718 
719 	CU_set_error_action(CUEA_ABORT);
720 	CU_initialize_registry();
721 
722 	suite = CU_add_suite("nvme_qpair", NULL, NULL);
723 
724 	CU_ADD_TEST(suite, test3);
725 	CU_ADD_TEST(suite, test_ctrlr_failed);
726 	CU_ADD_TEST(suite, struct_packing);
727 	CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
728 	CU_ADD_TEST(suite, test_nvme_completion_is_retry);
729 #ifdef DEBUG
730 	CU_ADD_TEST(suite, test_get_status_string);
731 #endif
732 	CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
733 	CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
734 	CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
735 	CU_ADD_TEST(suite, test_nvme_qpair_manual_complete_request);
736 	CU_ADD_TEST(suite, test_nvme_qpair_init_deinit);
737 
738 	CU_basic_set_mode(CU_BRM_VERBOSE);
739 	CU_basic_run_tests();
740 	num_failures = CU_get_number_of_failures();
741 	CU_cleanup_registry();
742 	return num_failures;
743 }
744