xref: /spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c (revision e1d1df9e56ae41666df37f6a5dba3ae003754cac)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #include "common/lib/test_env.c"
12 
13 pid_t g_spdk_nvme_pid;
14 
15 bool trace_flag = false;
16 #define SPDK_LOG_NVME trace_flag
17 
18 #include "nvme/nvme_qpair.c"
19 
20 SPDK_LOG_REGISTER_COMPONENT(nvme)
21 
22 struct nvme_driver _g_nvme_driver = {
23 	.lock = PTHREAD_MUTEX_INITIALIZER,
24 };
25 
26 DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair));
27 DEFINE_STUB(nvme_transport_qpair_submit_request, int,
28 	    (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
29 DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
30 DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
31 		struct spdk_nvme_qpair *qpair));
32 DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
33 
34 DEFINE_STUB_V(nvme_ctrlr_abort_queued_aborts, (struct spdk_nvme_ctrlr *ctrlr));
35 DEFINE_STUB(nvme_ctrlr_reinitialize_io_qpair, int,
36 	    (struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair), 0);
37 
38 void
39 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
40 {
41 	if (hot_remove) {
42 		ctrlr->is_removed = true;
43 	}
44 	ctrlr->is_failed = true;
45 }
46 
47 static bool g_called_transport_process_completions = false;
48 static int32_t g_transport_process_completions_rc = 0;
49 int32_t
50 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
51 {
52 	g_called_transport_process_completions = true;
53 	return g_transport_process_completions_rc;
54 }
55 
56 static void
57 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
58 			    struct spdk_nvme_ctrlr *ctrlr)
59 {
60 	memset(ctrlr, 0, sizeof(*ctrlr));
61 	ctrlr->free_io_qids = NULL;
62 	TAILQ_INIT(&ctrlr->active_io_qpairs);
63 	TAILQ_INIT(&ctrlr->active_procs);
64 	MOCK_CLEAR(spdk_zmalloc);
65 	nvme_qpair_init(qpair, 1, ctrlr, 0, 32, false);
66 }
67 
68 static void
69 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
70 {
71 	free(qpair->req_buf);
72 }
73 
74 static void
75 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
76 {
77 	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
78 }
79 
80 static void
81 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
82 {
83 	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
84 }
85 
86 static void
87 test3(void)
88 {
89 	struct spdk_nvme_qpair		qpair = {};
90 	struct nvme_request		*req;
91 	struct spdk_nvme_ctrlr		ctrlr = {};
92 
93 	qpair.state = NVME_QPAIR_ENABLED;
94 	prepare_submit_request_test(&qpair, &ctrlr);
95 
96 	req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
97 	SPDK_CU_ASSERT_FATAL(req != NULL);
98 
99 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
100 
101 	nvme_free_request(req);
102 
103 	cleanup_submit_request_test(&qpair);
104 }
105 
106 static void
107 test_ctrlr_failed(void)
108 {
109 	struct spdk_nvme_qpair		qpair = {};
110 	struct nvme_request		*req;
111 	struct spdk_nvme_ctrlr		ctrlr = {};
112 	char				payload[4096];
113 
114 	prepare_submit_request_test(&qpair, &ctrlr);
115 
116 	req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
117 					   NULL);
118 	SPDK_CU_ASSERT_FATAL(req != NULL);
119 
120 	/* Set the controller to failed.
121 	 * Set the controller to resetting so that the qpair won't get re-enabled.
122 	 */
123 	ctrlr.is_failed = true;
124 	ctrlr.is_resetting = true;
125 
126 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
127 
128 	cleanup_submit_request_test(&qpair);
129 }
130 
131 static void
132 struct_packing(void)
133 {
134 	/* ctrlr is the first field in nvme_qpair after the fields
135 	 * that are used in the I/O path. Make sure the I/O path fields
136 	 * all fit into two cache lines.
137 	 */
138 	CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
139 }
140 
141 static int g_num_cb_failed = 0;
142 static int g_num_cb_passed = 0;
143 
144 static void
145 dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
146 {
147 	if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
148 		g_num_cb_passed++;
149 	} else {
150 		g_num_cb_failed++;
151 	}
152 }
153 
154 static void
155 test_nvme_qpair_process_completions(void)
156 {
157 	struct spdk_nvme_qpair		admin_qp = {0};
158 	struct spdk_nvme_qpair		qpair = {0};
159 	struct spdk_nvme_ctrlr		ctrlr = {{0}};
160 	struct nvme_request		dummy_1 = {{0}};
161 	struct nvme_request		dummy_2 = {{0}};
162 	int				rc;
163 
164 	dummy_1.cb_fn = dummy_cb_fn;
165 	dummy_2.cb_fn = dummy_cb_fn;
166 	dummy_1.qpair = &qpair;
167 	dummy_2.qpair = &qpair;
168 
169 	TAILQ_INIT(&ctrlr.active_io_qpairs);
170 	TAILQ_INIT(&ctrlr.active_procs);
171 	CU_ASSERT(pthread_mutex_init(&ctrlr.ctrlr_lock, NULL) == 0);
172 	nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32, false);
173 	nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32, false);
174 
175 	ctrlr.adminq = &admin_qp;
176 
177 	STAILQ_INIT(&qpair.queued_req);
178 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
179 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
180 	qpair.num_outstanding_reqs = 2;
181 
182 	/* If the controller is failed, return -ENXIO */
183 	ctrlr.is_failed = true;
184 	ctrlr.is_removed = false;
185 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
186 	CU_ASSERT(rc == -ENXIO);
187 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
188 	CU_ASSERT(g_num_cb_passed == 0);
189 	CU_ASSERT(g_num_cb_failed == 0);
190 	CU_ASSERT(qpair.num_outstanding_reqs == 2);
191 
192 	/* Same if the qpair is failed at the transport layer. */
193 	ctrlr.is_failed = false;
194 	ctrlr.is_removed = false;
195 	qpair.state = NVME_QPAIR_DISCONNECTED;
196 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
197 	CU_ASSERT(rc == -ENXIO);
198 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
199 	CU_ASSERT(g_num_cb_passed == 0);
200 	CU_ASSERT(g_num_cb_failed == 0);
201 	CU_ASSERT(qpair.num_outstanding_reqs == 2);
202 
203 	/* If the controller is removed, make sure we abort the requests. */
204 	ctrlr.is_failed = true;
205 	ctrlr.is_removed = true;
206 	qpair.state = NVME_QPAIR_CONNECTED;
207 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
208 	CU_ASSERT(rc == -ENXIO);
209 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
210 	CU_ASSERT(g_num_cb_passed == 0);
211 	CU_ASSERT(g_num_cb_failed == 2);
212 	CU_ASSERT(qpair.num_outstanding_reqs == 0);
213 
214 	/* If we are resetting, make sure that we don't call into the transport. */
215 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
216 	dummy_1.queued = true;
217 	STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
218 	dummy_2.queued = true;
219 	g_num_cb_failed = 0;
220 	ctrlr.is_failed = false;
221 	ctrlr.is_removed = false;
222 	ctrlr.is_resetting = true;
223 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
224 	CU_ASSERT(rc == -ENXIO);
225 	CU_ASSERT(g_called_transport_process_completions == false);
226 	/* We also need to make sure we didn't abort the requests. */
227 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
228 	CU_ASSERT(g_num_cb_passed == 0);
229 	CU_ASSERT(g_num_cb_failed == 0);
230 
231 	/* The case where we aren't resetting, but are enabling the qpair is the same as above. */
232 	ctrlr.is_resetting = false;
233 	qpair.state = NVME_QPAIR_ENABLING;
234 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
235 	CU_ASSERT(rc == -ENXIO);
236 	CU_ASSERT(g_called_transport_process_completions == false);
237 	CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
238 	CU_ASSERT(g_num_cb_passed == 0);
239 	CU_ASSERT(g_num_cb_failed == 0);
240 
241 	/* For other qpair states, we want to enable the qpair. */
242 	qpair.state = NVME_QPAIR_CONNECTED;
243 	rc = spdk_nvme_qpair_process_completions(&qpair, 1);
244 	CU_ASSERT(rc == 0);
245 	CU_ASSERT(g_called_transport_process_completions == true);
246 	/* These should have been submitted to the lower layer. */
247 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
248 	CU_ASSERT(g_num_cb_passed == 0);
249 	CU_ASSERT(g_num_cb_failed == 0);
250 	CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
251 
252 	g_called_transport_process_completions = false;
253 	g_transport_process_completions_rc = -ENXIO;
254 
255 	/* Fail the controller if we get an error from the transport on admin qpair. */
256 	admin_qp.state = NVME_QPAIR_ENABLED;
257 	rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
258 	CU_ASSERT(rc == -ENXIO);
259 	CU_ASSERT(g_called_transport_process_completions == true);
260 	CU_ASSERT(ctrlr.is_failed == true);
261 
262 	/* Don't fail the controller for regular qpairs. */
263 	ctrlr.is_failed = false;
264 	g_called_transport_process_completions = false;
265 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
266 	CU_ASSERT(rc == -ENXIO);
267 	CU_ASSERT(g_called_transport_process_completions == true);
268 	CU_ASSERT(ctrlr.is_failed == false);
269 
270 	/* Make sure we don't modify the return value from the transport. */
271 	ctrlr.is_failed = false;
272 	g_called_transport_process_completions = false;
273 	g_transport_process_completions_rc = 23;
274 	rc = spdk_nvme_qpair_process_completions(&qpair, 0);
275 	CU_ASSERT(rc == 23);
276 	CU_ASSERT(g_called_transport_process_completions == true);
277 	CU_ASSERT(ctrlr.is_failed == false);
278 
279 	free(qpair.req_buf);
280 	free(admin_qp.req_buf);
281 }
282 
283 static void
284 test_nvme_completion_is_retry(void)
285 {
286 	struct spdk_nvme_cpl	cpl = {};
287 
288 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
289 	cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
290 	cpl.status.dnr = 0;
291 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
292 
293 	cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
294 	cpl.status.dnr = 1;
295 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
296 	cpl.status.dnr = 0;
297 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
298 
299 	cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
300 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
301 
302 	cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
303 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
304 
305 	cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
306 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
307 
308 	cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
309 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
310 
311 	cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
312 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
313 
314 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
315 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
316 
317 	cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
318 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
319 
320 	cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
321 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
322 
323 	cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
324 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
325 
326 	cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
327 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
328 
329 	cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
330 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
331 
332 	cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
333 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
334 
335 	cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
336 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
337 
338 	cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
339 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
340 
341 	cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
342 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
343 
344 	cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
345 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
346 
347 	cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
348 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
349 
350 	cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
351 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
352 
353 	cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
354 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
355 
356 	cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
357 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
358 
359 	cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
360 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
361 
362 	cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
363 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
364 
365 	cpl.status.sc = 0x70;
366 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
367 
368 	cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
369 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
370 
371 	cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
372 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
373 
374 	cpl.status.sct = SPDK_NVME_SCT_PATH;
375 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
376 	cpl.status.dnr = 0;
377 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
378 
379 	cpl.status.sct = SPDK_NVME_SCT_PATH;
380 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
381 	cpl.status.dnr = 1;
382 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
383 
384 	cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
385 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
386 
387 	cpl.status.sct = 0x4;
388 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
389 }
390 
391 #ifdef DEBUG
392 static void
393 test_get_status_string(void)
394 {
395 	const char	*status_string;
396 	struct spdk_nvme_status status;
397 
398 	status.sct = SPDK_NVME_SCT_GENERIC;
399 	status.sc = SPDK_NVME_SC_SUCCESS;
400 	status_string = spdk_nvme_cpl_get_status_string(&status);
401 	CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
402 
403 	status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
404 	status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
405 	status_string = spdk_nvme_cpl_get_status_string(&status);
406 	CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
407 
408 	status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
409 	status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
410 	status_string = spdk_nvme_cpl_get_status_string(&status);
411 	CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
412 
413 	status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
414 	status.sc = 0;
415 	status_string = spdk_nvme_cpl_get_status_string(&status);
416 	CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
417 
418 	status.sct = 0x4;
419 	status.sc = 0;
420 	status_string = spdk_nvme_cpl_get_status_string(&status);
421 	CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
422 }
423 #endif
424 
425 static void
426 test_nvme_qpair_add_cmd_error_injection(void)
427 {
428 	struct spdk_nvme_qpair qpair = {};
429 	struct spdk_nvme_ctrlr ctrlr = {};
430 	pthread_mutexattr_t attr;
431 	int rc;
432 
433 	prepare_submit_request_test(&qpair, &ctrlr);
434 	ctrlr.adminq = &qpair;
435 
436 	SPDK_CU_ASSERT_FATAL(pthread_mutexattr_init(&attr) == 0);
437 	SPDK_CU_ASSERT_FATAL(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE) == 0);
438 	SPDK_CU_ASSERT_FATAL(pthread_mutex_init(&ctrlr.ctrlr_lock, &attr) == 0);
439 	pthread_mutexattr_destroy(&attr);
440 
441 	/* Admin error injection at submission path */
442 	MOCK_CLEAR(spdk_zmalloc);
443 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
444 			SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
445 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
446 
447 	CU_ASSERT(rc == 0);
448 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
449 
450 	/* Remove cmd error injection */
451 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
452 
453 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
454 
455 	/* IO error injection at completion path */
456 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
457 			SPDK_NVME_OPC_READ, false, 0, 1,
458 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
459 
460 	CU_ASSERT(rc == 0);
461 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
462 
463 	/* Provide the same opc, and check whether allocate a new entry */
464 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
465 			SPDK_NVME_OPC_READ, false, 0, 1,
466 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
467 
468 	CU_ASSERT(rc == 0);
469 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
470 	CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
471 
472 	/* Remove cmd error injection */
473 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
474 
475 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
476 
477 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
478 			SPDK_NVME_OPC_COMPARE, true, 0, 5,
479 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
480 
481 	CU_ASSERT(rc == 0);
482 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
483 
484 	/* Remove cmd error injection */
485 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
486 
487 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
488 
489 	pthread_mutex_destroy(&ctrlr.ctrlr_lock);
490 	cleanup_submit_request_test(&qpair);
491 }
492 
493 static struct nvme_request *
494 allocate_request_tree(struct spdk_nvme_qpair *qpair)
495 {
496 	struct nvme_request	*req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
497 
498 	/*
499 	 *  Build a request chain like the following:
500 	 *            req
501 	 *             |
502 	 *      ---------------
503 	 *     |       |       |
504 	 *    req1    req2    req3
505 	 *             |
506 	 *      ---------------
507 	 *     |       |       |
508 	 *   req2_1  req2_2  req2_3
509 	 */
510 	req = nvme_allocate_request_null(qpair, NULL, NULL);
511 	CU_ASSERT(req != NULL);
512 	TAILQ_INIT(&req->children);
513 
514 	req1 = nvme_allocate_request_null(qpair, NULL, NULL);
515 	CU_ASSERT(req1 != NULL);
516 	req->num_children++;
517 	TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
518 	req1->parent = req;
519 
520 	req2 = nvme_allocate_request_null(qpair, NULL, NULL);
521 	CU_ASSERT(req2 != NULL);
522 	TAILQ_INIT(&req2->children);
523 	req->num_children++;
524 	TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
525 	req2->parent = req;
526 
527 	req3 = nvme_allocate_request_null(qpair, NULL, NULL);
528 	CU_ASSERT(req3 != NULL);
529 	req->num_children++;
530 	TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
531 	req3->parent = req;
532 
533 	req2_1 = nvme_allocate_request_null(qpair, NULL, NULL);
534 	CU_ASSERT(req2_1 != NULL);
535 	req2->num_children++;
536 	TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
537 	req2_1->parent = req2;
538 
539 	req2_2 = nvme_allocate_request_null(qpair, NULL, NULL);
540 	CU_ASSERT(req2_2 != NULL);
541 	req2->num_children++;
542 	TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
543 	req2_2->parent = req2;
544 
545 	req2_3 = nvme_allocate_request_null(qpair, NULL, NULL);
546 	CU_ASSERT(req2_3 != NULL);
547 	req2->num_children++;
548 	TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
549 	req2_3->parent = req2;
550 
551 	return req;
552 }
553 
554 static void
555 test_nvme_qpair_submit_request(void)
556 {
557 	int				rc;
558 	struct spdk_nvme_qpair		qpair = {};
559 	struct spdk_nvme_ctrlr		ctrlr = {};
560 	struct nvme_request		*req;
561 
562 	prepare_submit_request_test(&qpair, &ctrlr);
563 
564 	req = allocate_request_tree(&qpair);
565 	ctrlr.is_failed = true;
566 	rc = nvme_qpair_submit_request(&qpair, req);
567 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
568 
569 	req = allocate_request_tree(&qpair);
570 	ctrlr.is_failed = false;
571 	qpair.state = NVME_QPAIR_DISCONNECTING;
572 	rc = nvme_qpair_submit_request(&qpair, req);
573 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
574 
575 	cleanup_submit_request_test(&qpair);
576 }
577 
578 static void
579 test_nvme_qpair_resubmit_request_with_transport_failed(void)
580 {
581 	int				rc;
582 	struct spdk_nvme_qpair		qpair = {};
583 	struct spdk_nvme_ctrlr		ctrlr = {};
584 	struct nvme_request		*req;
585 
586 	prepare_submit_request_test(&qpair, &ctrlr);
587 
588 	req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
589 	CU_ASSERT(req != NULL);
590 	TAILQ_INIT(&req->children);
591 
592 	STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
593 	req->queued = true;
594 
595 	g_transport_process_completions_rc = 1;
596 	qpair.state = NVME_QPAIR_ENABLED;
597 	g_num_cb_failed = 0;
598 	MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
599 	rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
600 	MOCK_CLEAR(nvme_transport_qpair_submit_request);
601 	CU_ASSERT(rc == g_transport_process_completions_rc);
602 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
603 	CU_ASSERT(g_num_cb_failed == 1);
604 
605 	cleanup_submit_request_test(&qpair);
606 }
607 
608 static void
609 ut_spdk_nvme_cmd_cb(void *cb_arg, const struct spdk_nvme_cpl *cpl)
610 {
611 	CU_ASSERT(cb_arg == (void *)0xDEADBEEF);
612 	CU_ASSERT(cpl->sqid == 1);
613 	CU_ASSERT(cpl->status.sct == SPDK_NVME_SCT_GENERIC);
614 	CU_ASSERT(cpl->status.sc == SPDK_NVME_SC_SUCCESS);
615 	CU_ASSERT(cpl->status.dnr == 1);
616 }
617 
618 static void
619 test_nvme_qpair_manual_complete_request(void)
620 {
621 	struct spdk_nvme_qpair qpair = {};
622 	struct nvme_request req = {};
623 	struct spdk_nvme_ctrlr ctrlr = {};
624 
625 	qpair.ctrlr = &ctrlr;
626 	qpair.id = 1;
627 	req.cb_fn = ut_spdk_nvme_cmd_cb;
628 	req.cb_arg = (void *) 0xDEADBEEF;
629 	req.qpair = &qpair;
630 	req.num_children = 0;
631 	qpair.ctrlr->opts.disable_error_logging = false;
632 	STAILQ_INIT(&qpair.free_req);
633 	SPDK_CU_ASSERT_FATAL(STAILQ_EMPTY(&qpair.free_req));
634 	qpair.num_outstanding_reqs = 1;
635 
636 	nvme_qpair_manual_complete_request(&qpair, &req, SPDK_NVME_SCT_GENERIC,
637 					   SPDK_NVME_SC_SUCCESS, 1, true);
638 	CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));
639 	CU_ASSERT(qpair.num_outstanding_reqs == 0);
640 }
641 
642 static void
643 ut_spdk_nvme_cmd_cb_empty(void *cb_arg, const struct spdk_nvme_cpl *cpl)
644 {
645 
646 }
647 
648 static void
649 test_nvme_qpair_init_deinit(void)
650 {
651 	struct spdk_nvme_qpair qpair = {};
652 	struct nvme_request *reqs[3] = {};
653 	struct spdk_nvme_ctrlr ctrlr = {};
654 	struct nvme_error_cmd *cmd = NULL;
655 	struct nvme_request *var_req = NULL;
656 	int rc, i = 0;
657 
658 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
659 
660 	rc = nvme_qpair_init(&qpair, 1, &ctrlr, SPDK_NVME_QPRIO_HIGH, 3, false);
661 	CU_ASSERT(rc == 0);
662 	CU_ASSERT(qpair.id == 1);
663 	CU_ASSERT(qpair.qprio == SPDK_NVME_QPRIO_HIGH);
664 	CU_ASSERT(qpair.in_completion_context == 0);
665 	CU_ASSERT(qpair.delete_after_completion_context == 0);
666 	CU_ASSERT(qpair.no_deletion_notification_needed == 0);
667 	CU_ASSERT(qpair.ctrlr == &ctrlr);
668 	CU_ASSERT(qpair.trtype == SPDK_NVME_TRANSPORT_PCIE);
669 	CU_ASSERT(qpair.req_buf != NULL);
670 
671 	SPDK_CU_ASSERT_FATAL(!STAILQ_EMPTY(&qpair.free_req));
672 	STAILQ_FOREACH(var_req, &qpair.free_req, stailq) {
673 		/* Check requests address alignment */
674 		CU_ASSERT((uint64_t)var_req % 64 == 0);
675 		CU_ASSERT(var_req->qpair == &qpair);
676 		var_req->pid = getpid();
677 		reqs[i++] = var_req;
678 	}
679 	CU_ASSERT(i == 3);
680 
681 	/* Allocate cmd memory for deinit using */
682 	cmd = spdk_zmalloc(sizeof(*cmd), 64, NULL, SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_SHARE);
683 	SPDK_CU_ASSERT_FATAL(cmd != NULL);
684 	TAILQ_INSERT_TAIL(&qpair.err_cmd_head, cmd, link);
685 	for (int i = 0; i < 3; i++) {
686 		reqs[i]->cb_fn = ut_spdk_nvme_cmd_cb_empty;
687 		reqs[i]->cb_arg = (void *) 0xDEADBEEF;
688 		reqs[i]->num_children = 0;
689 	}
690 
691 	/* Emulate requests into various type queues */
692 	STAILQ_REMOVE(&qpair.free_req, reqs[0], nvme_request, stailq);
693 	STAILQ_INSERT_TAIL(&qpair.queued_req, reqs[0], stailq);
694 	STAILQ_REMOVE(&qpair.free_req, reqs[1], nvme_request, stailq);
695 	STAILQ_INSERT_TAIL(&qpair.aborting_queued_req, reqs[1], stailq);
696 	STAILQ_REMOVE(&qpair.free_req, reqs[2], nvme_request, stailq);
697 	STAILQ_INSERT_TAIL(&qpair.err_req_head, reqs[2], stailq);
698 	CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
699 	qpair.num_outstanding_reqs = 3;
700 
701 	nvme_qpair_deinit(&qpair);
702 	CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
703 	CU_ASSERT(STAILQ_EMPTY(&qpair.aborting_queued_req));
704 	CU_ASSERT(STAILQ_EMPTY(&qpair.err_req_head));
705 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
706 	CU_ASSERT(qpair.num_outstanding_reqs == 0);
707 }
708 
709 static void
710 test_nvme_get_sgl_print_info(void)
711 {
712 	char buf[NVME_CMD_DPTR_STR_SIZE] = {};
713 	struct spdk_nvme_cmd cmd = {};
714 
715 	cmd.dptr.sgl1.keyed.length = 0x1000;
716 	cmd.dptr.sgl1.keyed.key = 0xababccdd;
717 
718 	nvme_get_sgl_keyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
719 	CU_ASSERT(!strncmp(buf, " len:0x1000 key:0xababccdd", NVME_CMD_DPTR_STR_SIZE));
720 
721 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
722 	cmd.dptr.sgl1.unkeyed.length = 0x1000;
723 
724 	nvme_get_sgl_unkeyed(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
725 	CU_ASSERT(!strncmp(buf, " len:0x1000", NVME_CMD_DPTR_STR_SIZE));
726 
727 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
728 	cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
729 	cmd.dptr.sgl1.generic.subtype = 0;
730 	cmd.dptr.sgl1.address = 0xdeadbeef;
731 	cmd.dptr.sgl1.unkeyed.length = 0x1000;
732 
733 	nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
734 	CU_ASSERT(!strncmp(buf, "SGL DATA BLOCK ADDRESS 0xdeadbeef len:0x1000",
735 			   NVME_CMD_DPTR_STR_SIZE));
736 
737 	memset(&cmd.dptr.sgl1, 0, sizeof(cmd.dptr.sgl1));
738 	cmd.dptr.sgl1.generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
739 	cmd.dptr.sgl1.generic.subtype = 0;
740 	cmd.dptr.sgl1.address = 0xdeadbeef;
741 	cmd.dptr.sgl1.keyed.length = 0x1000;
742 	cmd.dptr.sgl1.keyed.key = 0xababccdd;
743 
744 	nvme_get_sgl(buf, NVME_CMD_DPTR_STR_SIZE, &cmd);
745 	CU_ASSERT(!strncmp(buf, "SGL KEYED DATA BLOCK ADDRESS 0xdeadbeef len:0x1000 key:0xababccdd",
746 			   NVME_CMD_DPTR_STR_SIZE));
747 }
748 
749 int
750 main(int argc, char **argv)
751 {
752 	CU_pSuite	suite = NULL;
753 	unsigned int	num_failures;
754 
755 	CU_initialize_registry();
756 
757 	suite = CU_add_suite("nvme_qpair", NULL, NULL);
758 
759 	CU_ADD_TEST(suite, test3);
760 	CU_ADD_TEST(suite, test_ctrlr_failed);
761 	CU_ADD_TEST(suite, struct_packing);
762 	CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
763 	CU_ADD_TEST(suite, test_nvme_completion_is_retry);
764 #ifdef DEBUG
765 	CU_ADD_TEST(suite, test_get_status_string);
766 #endif
767 	CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
768 	CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
769 	CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
770 	CU_ADD_TEST(suite, test_nvme_qpair_manual_complete_request);
771 	CU_ADD_TEST(suite, test_nvme_qpair_init_deinit);
772 	CU_ADD_TEST(suite, test_nvme_get_sgl_print_info);
773 
774 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
775 	CU_cleanup_registry();
776 	return num_failures;
777 }
778