xref: /spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c (revision 407e88fd2ab020d753e33014cf759353a9901b51)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "common/lib/test_env.c"
39 
40 pid_t g_spdk_nvme_pid;
41 
42 bool trace_flag = false;
43 #define SPDK_LOG_NVME trace_flag
44 
45 #include "nvme/nvme_qpair.c"
46 
47 struct nvme_driver _g_nvme_driver = {
48 	.lock = PTHREAD_MUTEX_INITIALIZER,
49 };
50 
51 void
52 nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
53 {
54 }
55 
56 int
57 nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
58 {
59 	/* TODO */
60 	return 0;
61 }
62 
63 int32_t
64 nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
65 {
66 	/* TODO */
67 	return 0;
68 }
69 
70 int
71 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
72 {
73 	return 0;
74 }
75 
76 static void
77 prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
78 			    struct spdk_nvme_ctrlr *ctrlr)
79 {
80 	memset(ctrlr, 0, sizeof(*ctrlr));
81 	ctrlr->free_io_qids = NULL;
82 	TAILQ_INIT(&ctrlr->active_io_qpairs);
83 	TAILQ_INIT(&ctrlr->active_procs);
84 	MOCK_CLEAR(spdk_zmalloc);
85 	nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
86 }
87 
88 static void
89 cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
90 {
91 	free(qpair->req_buf);
92 }
93 
94 static void
95 expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
96 {
97 	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
98 }
99 
100 static void
101 expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
102 {
103 	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
104 }
105 
106 static void
107 test3(void)
108 {
109 	struct spdk_nvme_qpair		qpair = {};
110 	struct nvme_request		*req;
111 	struct spdk_nvme_ctrlr		ctrlr = {};
112 
113 	prepare_submit_request_test(&qpair, &ctrlr);
114 
115 	req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
116 	SPDK_CU_ASSERT_FATAL(req != NULL);
117 
118 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
119 
120 	nvme_free_request(req);
121 
122 	cleanup_submit_request_test(&qpair);
123 }
124 
125 static void
126 test_ctrlr_failed(void)
127 {
128 	struct spdk_nvme_qpair		qpair = {};
129 	struct nvme_request		*req;
130 	struct spdk_nvme_ctrlr		ctrlr = {};
131 	char				payload[4096];
132 
133 	prepare_submit_request_test(&qpair, &ctrlr);
134 
135 	req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
136 					   NULL);
137 	SPDK_CU_ASSERT_FATAL(req != NULL);
138 
139 	/* Set the controller to failed.
140 	 * Set the controller to resetting so that the qpair won't get re-enabled.
141 	 */
142 	ctrlr.is_failed = true;
143 	ctrlr.is_resetting = true;
144 
145 	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
146 
147 	cleanup_submit_request_test(&qpair);
148 }
149 
150 static void struct_packing(void)
151 {
152 	/* ctrlr is the first field in nvme_qpair after the fields
153 	 * that are used in the I/O path. Make sure the I/O path fields
154 	 * all fit into two cache lines.
155 	 */
156 	CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
157 }
158 
159 static void test_nvme_qpair_process_completions(void)
160 {
161 	struct spdk_nvme_qpair		qpair = {};
162 	struct spdk_nvme_ctrlr		ctrlr = {};
163 
164 	prepare_submit_request_test(&qpair, &ctrlr);
165 	qpair.ctrlr->is_resetting = true;
166 
167 	spdk_nvme_qpair_process_completions(&qpair, 0);
168 	cleanup_submit_request_test(&qpair);
169 }
170 
171 static void test_nvme_completion_is_retry(void)
172 {
173 	struct spdk_nvme_cpl	cpl = {};
174 
175 	cpl.status.sct = SPDK_NVME_SCT_GENERIC;
176 	cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
177 	cpl.status.dnr = 0;
178 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
179 
180 	cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
181 	cpl.status.dnr = 1;
182 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
183 	cpl.status.dnr = 0;
184 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
185 
186 	cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
187 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
188 
189 	cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
190 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
191 
192 	cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
193 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
194 
195 	cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
196 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
197 
198 	cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
199 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
200 
201 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
202 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
203 
204 	cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
205 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
206 
207 	cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
208 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
209 
210 	cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
211 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
212 
213 	cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
214 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
215 
216 	cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
217 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
218 
219 	cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
220 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
221 
222 	cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
223 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
224 
225 	cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
226 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
227 
228 	cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
229 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
230 
231 	cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
232 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
233 
234 	cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
235 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
236 
237 	cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
238 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
239 
240 	cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
241 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
242 
243 	cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
244 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
245 
246 	cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
247 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
248 
249 	cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
250 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
251 
252 	cpl.status.sc = 0x70;
253 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
254 
255 	cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
256 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
257 
258 	cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
259 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
260 
261 	cpl.status.sct = SPDK_NVME_SCT_PATH;
262 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
263 	cpl.status.dnr = 0;
264 	CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
265 
266 	cpl.status.sct = SPDK_NVME_SCT_PATH;
267 	cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
268 	cpl.status.dnr = 1;
269 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
270 
271 	cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
272 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
273 
274 	cpl.status.sct = 0x4;
275 	CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
276 }
277 
278 #ifdef DEBUG
279 static void
280 test_get_status_string(void)
281 {
282 	const char	*status_string;
283 	struct spdk_nvme_status status;
284 
285 	status.sct = SPDK_NVME_SCT_GENERIC;
286 	status.sc = SPDK_NVME_SC_SUCCESS;
287 	status_string = spdk_nvme_cpl_get_status_string(&status);
288 	CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
289 
290 	status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
291 	status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
292 	status_string = spdk_nvme_cpl_get_status_string(&status);
293 	CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
294 
295 	status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
296 	status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
297 	status_string = spdk_nvme_cpl_get_status_string(&status);
298 	CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
299 
300 	status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
301 	status.sc = 0;
302 	status_string = spdk_nvme_cpl_get_status_string(&status);
303 	CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
304 
305 	status.sct = 0x4;
306 	status.sc = 0;
307 	status_string = spdk_nvme_cpl_get_status_string(&status);
308 	CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
309 }
310 #endif
311 
312 static void
313 test_nvme_qpair_add_cmd_error_injection(void)
314 {
315 	struct spdk_nvme_qpair qpair = {};
316 	struct spdk_nvme_ctrlr ctrlr = {};
317 	int rc;
318 
319 	prepare_submit_request_test(&qpair, &ctrlr);
320 	ctrlr.adminq = &qpair;
321 
322 	/* Admin error injection at submission path */
323 	MOCK_CLEAR(spdk_zmalloc);
324 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
325 			SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
326 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
327 
328 	CU_ASSERT(rc == 0);
329 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
330 
331 	/* Remove cmd error injection */
332 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
333 
334 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
335 
336 	/* IO error injection at completion path */
337 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
338 			SPDK_NVME_OPC_READ, false, 0, 1,
339 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
340 
341 	CU_ASSERT(rc == 0);
342 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
343 
344 	/* Provide the same opc, and check whether allocate a new entry */
345 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
346 			SPDK_NVME_OPC_READ, false, 0, 1,
347 			SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
348 
349 	CU_ASSERT(rc == 0);
350 	SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
351 	CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
352 
353 	/* Remove cmd error injection */
354 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
355 
356 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
357 
358 	rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
359 			SPDK_NVME_OPC_COMPARE, true, 0, 5,
360 			SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
361 
362 	CU_ASSERT(rc == 0);
363 	CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
364 
365 	/* Remove cmd error injection */
366 	spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
367 
368 	CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
369 
370 	cleanup_submit_request_test(&qpair);
371 }
372 
373 static void
374 test_nvme_qpair_submit_request(void)
375 {
376 	int				rc;
377 	struct spdk_nvme_qpair		qpair = {};
378 	struct spdk_nvme_ctrlr		ctrlr = {};
379 	struct nvme_request		*req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
380 
381 	prepare_submit_request_test(&qpair, &ctrlr);
382 
383 	/*
384 	 *  Build a request chain like the following:
385 	 *            req
386 	 *             |
387 	 *      ---------------
388 	 *     |       |       |
389 	 *    req1    req2    req3
390 	 *             |
391 	 *      ---------------
392 	 *     |       |       |
393 	 *   req2_1  req2_2  req2_3
394 	 */
395 	req = nvme_allocate_request_null(&qpair, NULL, NULL);
396 	CU_ASSERT(req != NULL);
397 	TAILQ_INIT(&req->children);
398 
399 	req1 = nvme_allocate_request_null(&qpair, NULL, NULL);
400 	CU_ASSERT(req1 != NULL);
401 	req->num_children++;
402 	TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
403 	req1->parent = req;
404 
405 	req2 = nvme_allocate_request_null(&qpair, NULL, NULL);
406 	CU_ASSERT(req2 != NULL);
407 	TAILQ_INIT(&req2->children);
408 	req->num_children++;
409 	TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
410 	req2->parent = req;
411 
412 	req3 = nvme_allocate_request_null(&qpair, NULL, NULL);
413 	CU_ASSERT(req3 != NULL);
414 	req->num_children++;
415 	TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
416 	req3->parent = req;
417 
418 	req2_1 = nvme_allocate_request_null(&qpair, NULL, NULL);
419 	CU_ASSERT(req2_1 != NULL);
420 	req2->num_children++;
421 	TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
422 	req2_1->parent = req2;
423 
424 	req2_2 = nvme_allocate_request_null(&qpair, NULL, NULL);
425 	CU_ASSERT(req2_2 != NULL);
426 	req2->num_children++;
427 	TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
428 	req2_2->parent = req2;
429 
430 	req2_3 = nvme_allocate_request_null(&qpair, NULL, NULL);
431 	CU_ASSERT(req2_3 != NULL);
432 	req2->num_children++;
433 	TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
434 	req2_3->parent = req2;
435 
436 	ctrlr.is_failed = true;
437 	rc = nvme_qpair_submit_request(&qpair, req);
438 	SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
439 
440 	cleanup_submit_request_test(&qpair);
441 }
442 
443 int main(int argc, char **argv)
444 {
445 	CU_pSuite	suite = NULL;
446 	unsigned int	num_failures;
447 
448 	if (CU_initialize_registry() != CUE_SUCCESS) {
449 		return CU_get_error();
450 	}
451 
452 	suite = CU_add_suite("nvme_qpair", NULL, NULL);
453 	if (suite == NULL) {
454 		CU_cleanup_registry();
455 		return CU_get_error();
456 	}
457 
458 	if (CU_add_test(suite, "test3", test3) == NULL
459 	    || CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
460 	    || CU_add_test(suite, "struct_packing", struct_packing) == NULL
461 	    || CU_add_test(suite, "spdk_nvme_qpair_process_completions",
462 			   test_nvme_qpair_process_completions) == NULL
463 	    || CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
464 #ifdef DEBUG
465 	    || CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
466 #endif
467 	    || CU_add_test(suite, "spdk_nvme_qpair_add_cmd_error_injection",
468 			   test_nvme_qpair_add_cmd_error_injection) == NULL
469 	    || CU_add_test(suite, "spdk_nvme_qpair_submit_request",
470 			   test_nvme_qpair_submit_request) == NULL
471 	   ) {
472 		CU_cleanup_registry();
473 		return CU_get_error();
474 	}
475 
476 	CU_basic_set_mode(CU_BRM_VERBOSE);
477 	CU_basic_run_tests();
478 	num_failures = CU_get_number_of_failures();
479 	CU_cleanup_registry();
480 	return num_failures;
481 }
482