xref: /spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c (revision 6c84c86e48e7a10e5c43aba0c801783e1ec0969c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2015 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
4  */
5 
6 #include "spdk_internal/cunit.h"
7 
8 #include "spdk/env.h"
9 
10 #include "nvme/nvme.c"
11 
12 #include "spdk_internal/mock.h"
13 
14 #include "common/lib/test_env.c"
15 
16 DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr));
17 DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr));
18 DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove));
19 DEFINE_STUB(spdk_nvme_transport_available_by_name, bool,
20 	    (const char *transport_name), true);
21 /* return anything non-NULL, this won't be dereferenced anywhere in this test */
22 DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
23 	    (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
24 DEFINE_STUB(nvme_ctrlr_process_init, int,
25 	    (struct spdk_nvme_ctrlr *ctrlr), 0);
26 DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
27 	    (struct spdk_nvme_ctrlr *ctrlr), 0);
28 DEFINE_STUB(dummy_probe_cb, bool,
29 	    (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
30 	     struct spdk_nvme_ctrlr_opts *opts), false);
31 DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
32 	    (const struct spdk_nvme_transport_id *trid,
33 	     const struct spdk_nvme_ctrlr_opts *opts,
34 	     void *devhandle), NULL);
35 DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr));
36 DEFINE_STUB(spdk_nvme_transport_available, bool,
37 	    (enum spdk_nvme_transport_type trtype), true);
38 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0);
39 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
40 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
41 DEFINE_STUB(nvme_transport_ctrlr_scan_attached, int, (struct spdk_nvme_probe_ctx *probe_ctx), 0);
42 
43 static bool ut_destruct_called = false;
44 void
45 nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
46 {
47 	ut_destruct_called = true;
48 }
49 
50 void
51 nvme_ctrlr_destruct_async(struct spdk_nvme_ctrlr *ctrlr, struct nvme_ctrlr_detach_ctx *ctx)
52 {
53 	ut_destruct_called = true;
54 	ctrlr->is_destructed = true;
55 
56 	ctx->shutdown_complete = true;
57 }
58 
59 int
60 nvme_ctrlr_destruct_poll_async(struct spdk_nvme_ctrlr *ctrlr,
61 			       struct nvme_ctrlr_detach_ctx *ctx)
62 {
63 	if (!ctx->shutdown_complete) {
64 		return -EAGAIN;
65 	}
66 
67 	if (ctx->cb_fn) {
68 		ctx->cb_fn(ctrlr);
69 	}
70 
71 	return 0;
72 }
73 
74 union spdk_nvme_csts_register
75 	spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
76 {
77 	union spdk_nvme_csts_register csts = {};
78 	return csts;
79 }
80 
81 void
82 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
83 {
84 	memset(opts, 0, opts_size);
85 	nvme_get_default_hostnqn(opts->hostnqn, sizeof(opts->hostnqn));
86 	opts->opts_size = opts_size;
87 }
88 
89 static void
90 memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
91 {
92 	memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
93 	memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
94 }
95 
96 static bool ut_check_trtype = false;
97 static bool ut_test_probe_internal = false;
98 
99 static int
100 ut_nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
101 			bool direct_connect)
102 {
103 	struct spdk_nvme_ctrlr *ctrlr;
104 	struct spdk_nvme_qpair qpair = {};
105 	int rc;
106 
107 	if (probe_ctx->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
108 		return -1;
109 	}
110 
111 	ctrlr = calloc(1, sizeof(*ctrlr));
112 	CU_ASSERT(ctrlr != NULL);
113 	ctrlr->adminq = &qpair;
114 
115 	/* happy path with first controller */
116 	MOCK_SET(nvme_transport_ctrlr_construct, ctrlr);
117 	rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
118 	CU_ASSERT(rc == 0);
119 
120 	/* failed with the second controller */
121 	MOCK_SET(nvme_transport_ctrlr_construct, NULL);
122 	rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
123 	CU_ASSERT(rc != 0);
124 	MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
125 
126 	return -1;
127 }
128 
129 int
130 nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
131 {
132 	free(ctrlr);
133 	return 0;
134 }
135 
136 int
137 nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
138 			  bool direct_connect)
139 {
140 	struct spdk_nvme_ctrlr *ctrlr = NULL;
141 
142 	if (ut_check_trtype == true) {
143 		CU_ASSERT(probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE);
144 	}
145 
146 	if (ut_test_probe_internal) {
147 		return ut_nvme_pcie_ctrlr_scan(probe_ctx, direct_connect);
148 	}
149 
150 	if (direct_connect == true && probe_ctx->probe_cb) {
151 		nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
152 		ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid, NULL);
153 		nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
154 		probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts);
155 	}
156 	return 0;
157 }
158 
159 static bool ut_attach_cb_called = false;
160 static void
161 dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
162 		struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
163 {
164 	ut_attach_cb_called = true;
165 }
166 
167 static int ut_attach_fail_cb_rc = 0;
168 static void
169 dummy_attach_fail_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid, int rc)
170 {
171 	ut_attach_fail_cb_rc = rc;
172 }
173 
174 static void
175 test_spdk_nvme_probe_ext(void)
176 {
177 	int rc = 0;
178 	const struct spdk_nvme_transport_id *trid = NULL;
179 	void *cb_ctx = NULL;
180 	spdk_nvme_probe_cb probe_cb = NULL;
181 	spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
182 	spdk_nvme_attach_fail_cb attach_fail_cb = dummy_attach_fail_cb;
183 	spdk_nvme_remove_cb remove_cb = NULL;
184 	struct spdk_nvme_ctrlr ctrlr;
185 	pthread_mutexattr_t attr;
186 	struct nvme_driver dummy = {};
187 	g_spdk_nvme_driver = &dummy;
188 
189 	/* driver init fails */
190 	MOCK_SET(spdk_process_is_primary, false);
191 	MOCK_SET(spdk_memzone_lookup, NULL);
192 	rc = spdk_nvme_probe_ext(trid, cb_ctx, probe_cb, attach_cb, attach_fail_cb, remove_cb);
193 	CU_ASSERT(rc == -1);
194 
195 	/*
196 	 * For secondary processes, the attach_cb should automatically get
197 	 * called for any controllers already initialized by the primary
198 	 * process.
199 	 */
200 	MOCK_SET(spdk_nvme_transport_available_by_name, false);
201 	MOCK_SET(spdk_process_is_primary, true);
202 	dummy.initialized = true;
203 	g_spdk_nvme_driver = &dummy;
204 	rc = spdk_nvme_probe_ext(trid, cb_ctx, probe_cb, attach_cb, attach_fail_cb, remove_cb);
205 	CU_ASSERT(rc == -1);
206 
207 	/* driver init passes, transport available, secondary call attach_cb */
208 	MOCK_SET(spdk_nvme_transport_available_by_name, true);
209 	MOCK_SET(spdk_process_is_primary, false);
210 	MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
211 	dummy.initialized = true;
212 	memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
213 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
214 	CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
215 	TAILQ_INIT(&dummy.shared_attached_ctrlrs);
216 	TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
217 	ut_attach_cb_called = false;
218 	/* setup nvme_transport_ctrlr_scan() stub to also check the trype */
219 	ut_check_trtype = true;
220 	rc = spdk_nvme_probe_ext(trid, cb_ctx, probe_cb, attach_cb, attach_fail_cb, remove_cb);
221 	CU_ASSERT(rc == 0);
222 	CU_ASSERT(ut_attach_cb_called == true);
223 
224 	/* driver init passes, transport available, we are primary */
225 	MOCK_SET(spdk_process_is_primary, true);
226 	rc = spdk_nvme_probe_ext(trid, cb_ctx, probe_cb, attach_cb, attach_fail_cb, remove_cb);
227 	CU_ASSERT(rc == 0);
228 
229 	g_spdk_nvme_driver = NULL;
230 	/* reset to pre-test values */
231 	MOCK_CLEAR(spdk_memzone_lookup);
232 	ut_check_trtype = false;
233 
234 	pthread_mutex_destroy(&dummy.lock);
235 	pthread_mutexattr_destroy(&attr);
236 }
237 
238 static void
239 test_spdk_nvme_connect(void)
240 {
241 	struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
242 	struct spdk_nvme_transport_id trid = {};
243 	struct spdk_nvme_ctrlr_opts opts = {};
244 	struct spdk_nvme_ctrlr ctrlr;
245 	pthread_mutexattr_t attr;
246 	struct nvme_driver dummy = {};
247 
248 	/* initialize the variable to prepare the test */
249 	dummy.initialized = true;
250 	TAILQ_INIT(&dummy.shared_attached_ctrlrs);
251 	g_spdk_nvme_driver = &dummy;
252 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
253 	CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
254 
255 	/* set NULL trid pointer to test immediate return */
256 	ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
257 	CU_ASSERT(ret_ctrlr == NULL);
258 
259 	/* driver init passes, transport available, secondary process connects ctrlr */
260 	MOCK_SET(spdk_process_is_primary, false);
261 	MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
262 	MOCK_SET(spdk_nvme_transport_available_by_name, true);
263 	memset(&trid, 0, sizeof(trid));
264 	trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
265 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
266 	CU_ASSERT(ret_ctrlr == NULL);
267 
268 	/* driver init passes, setup one ctrlr on the attached_list */
269 	memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
270 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr.opts, sizeof(ctrlr.opts));
271 	snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
272 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
273 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
274 	/* get the ctrlr from the attached list */
275 	snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
276 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
277 	CU_ASSERT(ret_ctrlr == &ctrlr);
278 	/* get the ctrlr from the attached list with default ctrlr opts */
279 	ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
280 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
281 	CU_ASSERT(ret_ctrlr == &ctrlr);
282 	CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
283 	/* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
284 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
285 	opts.num_io_queues = 1;
286 	ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
287 	CU_ASSERT(ret_ctrlr == &ctrlr);
288 	CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
289 	CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, sizeof(opts));
290 
291 	/* opts_size is 0 */
292 	ret_ctrlr = spdk_nvme_connect(&trid, &opts, 0);
293 	CU_ASSERT(ret_ctrlr == &ctrlr);
294 	CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 0);
295 
296 	/* opts_size is less than sizeof(*opts) if opts != NULL */
297 	ret_ctrlr = spdk_nvme_connect(&trid, &opts, 4);
298 	CU_ASSERT(ret_ctrlr == &ctrlr);
299 	CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
300 	CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 4);
301 	/* remove the attached ctrlr on the attached_list */
302 	MOCK_SET(nvme_ctrlr_get_ref_count, 1);
303 	CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
304 	CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
305 
306 	/* driver init passes, transport available, primary process connects ctrlr */
307 	MOCK_SET(spdk_process_is_primary, true);
308 	/* setup one ctrlr on the attached_list */
309 	memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
310 	snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
311 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
312 	spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr.opts, sizeof(ctrlr.opts));
313 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
314 	/* get the ctrlr from the attached list */
315 	snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
316 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
317 	CU_ASSERT(ret_ctrlr == &ctrlr);
318 	/* get the ctrlr from the attached list with default ctrlr opts */
319 	ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
320 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
321 	CU_ASSERT(ret_ctrlr == &ctrlr);
322 	CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
323 	/* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
324 	opts.num_io_queues = 2;
325 	ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
326 	CU_ASSERT(ret_ctrlr == &ctrlr);
327 	CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
328 	/* remove the attached ctrlr on the attached_list */
329 	CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
330 	CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
331 
332 	/* test driver init failure return */
333 	MOCK_SET(spdk_process_is_primary, false);
334 	MOCK_SET(spdk_memzone_lookup, NULL);
335 	ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
336 	CU_ASSERT(ret_ctrlr == NULL);
337 }
338 
339 static struct spdk_nvme_probe_ctx *
340 test_nvme_init_get_probe_ctx(void)
341 {
342 	struct spdk_nvme_probe_ctx *probe_ctx;
343 
344 	probe_ctx = calloc(1, sizeof(*probe_ctx));
345 	SPDK_CU_ASSERT_FATAL(probe_ctx != NULL);
346 	TAILQ_INIT(&probe_ctx->init_ctrlrs);
347 	TAILQ_INIT(&probe_ctx->failed_ctxs.head);
348 
349 	return probe_ctx;
350 }
351 
352 static void
353 test_nvme_init_controllers(void)
354 {
355 	int rc = 0;
356 	struct nvme_driver test_driver = {};
357 	void *cb_ctx = NULL;
358 	spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
359 	spdk_nvme_attach_fail_cb attach_fail_cb = dummy_attach_fail_cb;
360 	struct spdk_nvme_probe_ctx *probe_ctx;
361 	struct spdk_nvme_ctrlr *ctrlr;
362 	pthread_mutexattr_t attr;
363 
364 	g_spdk_nvme_driver = &test_driver;
365 	ctrlr = calloc(1, sizeof(*ctrlr));
366 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
367 	ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
368 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
369 	CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, &attr) == 0);
370 	CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
371 	TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
372 
373 	/*
374 	 * Try to initialize, but nvme_ctrlr_process_init will fail.
375 	 * Verify correct behavior when it does.
376 	 */
377 	MOCK_SET(nvme_ctrlr_process_init, 1);
378 	MOCK_SET(spdk_process_is_primary, 1);
379 	g_spdk_nvme_driver->initialized = false;
380 	ut_destruct_called = false;
381 	ut_attach_fail_cb_rc = 0;
382 	probe_ctx = test_nvme_init_get_probe_ctx();
383 	TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
384 	probe_ctx->cb_ctx = cb_ctx;
385 	probe_ctx->attach_cb = attach_cb;
386 	probe_ctx->attach_fail_cb = attach_fail_cb;
387 	probe_ctx->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
388 	rc = nvme_init_controllers(probe_ctx);
389 	CU_ASSERT(rc == 0);
390 	CU_ASSERT(g_spdk_nvme_driver->initialized == true);
391 	CU_ASSERT(ut_destruct_called == true);
392 	CU_ASSERT(ut_attach_fail_cb_rc == 1);
393 
394 	/*
395 	 * Controller init OK, need to move the controller state machine
396 	 * forward by setting the ctrl state so that it can be moved
397 	 * the shared_attached_ctrlrs list.
398 	 */
399 	probe_ctx = test_nvme_init_get_probe_ctx();
400 	TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
401 	ctrlr->state = NVME_CTRLR_STATE_READY;
402 	MOCK_SET(nvme_ctrlr_process_init, 0);
403 	rc = nvme_init_controllers(probe_ctx);
404 	CU_ASSERT(rc == 0);
405 	CU_ASSERT(ut_attach_cb_called == true);
406 	CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
407 	CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == ctrlr);
408 	TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
409 
410 	/*
411 	 * Reset to initial state
412 	 */
413 	CU_ASSERT(pthread_mutex_destroy(&ctrlr->ctrlr_lock) == 0);
414 	memset(ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
415 	CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, &attr) == 0);
416 
417 	/*
418 	 * Non-PCIe controllers should be added to the per-process list, not the shared list.
419 	 */
420 	ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
421 	probe_ctx = test_nvme_init_get_probe_ctx();
422 	TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
423 	ctrlr->state = NVME_CTRLR_STATE_READY;
424 	MOCK_SET(nvme_ctrlr_process_init, 0);
425 	rc = nvme_init_controllers(probe_ctx);
426 	CU_ASSERT(rc == 0);
427 	CU_ASSERT(ut_attach_cb_called == true);
428 	CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
429 	CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == ctrlr);
430 	TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq);
431 	CU_ASSERT(pthread_mutex_destroy(&ctrlr->ctrlr_lock) == 0);
432 	free(ctrlr);
433 	CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
434 
435 	g_spdk_nvme_driver = NULL;
436 	pthread_mutexattr_destroy(&attr);
437 	pthread_mutex_destroy(&test_driver.lock);
438 }
439 
440 static void
441 test_nvme_driver_init(void)
442 {
443 	int rc;
444 	struct nvme_driver dummy = {};
445 	g_spdk_nvme_driver = &dummy;
446 
447 	/* adjust this so testing doesn't take so long */
448 	g_nvme_driver_timeout_ms = 100;
449 
450 	/* process is primary and mem already reserved */
451 	MOCK_SET(spdk_process_is_primary, true);
452 	dummy.initialized = true;
453 	rc = nvme_driver_init();
454 	CU_ASSERT(rc == 0);
455 
456 	/*
457 	 * Process is primary and mem not yet reserved but the call
458 	 * to spdk_memzone_reserve() returns NULL.
459 	 */
460 	g_spdk_nvme_driver = NULL;
461 	MOCK_SET(spdk_process_is_primary, true);
462 	MOCK_SET(spdk_memzone_reserve, NULL);
463 	rc = nvme_driver_init();
464 	CU_ASSERT(rc == -1);
465 
466 	/* process is not primary, no mem already reserved */
467 	MOCK_SET(spdk_process_is_primary, false);
468 	MOCK_SET(spdk_memzone_lookup, NULL);
469 	g_spdk_nvme_driver = NULL;
470 	rc = nvme_driver_init();
471 	CU_ASSERT(rc == -1);
472 
473 	/* process is not primary, mem is already reserved & init'd */
474 	MOCK_SET(spdk_process_is_primary, false);
475 	MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
476 	dummy.initialized = true;
477 	rc = nvme_driver_init();
478 	CU_ASSERT(rc == 0);
479 
480 	/* process is not primary, mem is reserved but not initialized */
481 	/* and times out */
482 	MOCK_SET(spdk_process_is_primary, false);
483 	MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
484 	dummy.initialized = false;
485 	rc = nvme_driver_init();
486 	CU_ASSERT(rc == -1);
487 
488 	/* process is primary, got mem but mutex won't init */
489 	MOCK_SET(spdk_process_is_primary, true);
490 	MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
491 	MOCK_SET(pthread_mutexattr_init, -1);
492 	g_spdk_nvme_driver = NULL;
493 	dummy.initialized = true;
494 	rc = nvme_driver_init();
495 	/* for FreeBSD we can't can't effectively mock this path */
496 #ifndef __FreeBSD__
497 	CU_ASSERT(rc != 0);
498 #else
499 	CU_ASSERT(rc == 0);
500 #endif
501 
502 	/* process is primary, got mem, mutex OK */
503 	MOCK_SET(spdk_process_is_primary, true);
504 	MOCK_CLEAR(pthread_mutexattr_init);
505 	g_spdk_nvme_driver = NULL;
506 	rc = nvme_driver_init();
507 	CU_ASSERT(g_spdk_nvme_driver->initialized == false);
508 	CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
509 	CU_ASSERT(rc == 0);
510 
511 	g_spdk_nvme_driver = NULL;
512 	MOCK_CLEAR(spdk_memzone_reserve);
513 	MOCK_CLEAR(spdk_memzone_lookup);
514 }
515 
516 static void
517 test_spdk_nvme_detach(void)
518 {
519 	int rc = 1;
520 	struct spdk_nvme_ctrlr ctrlr;
521 	struct spdk_nvme_ctrlr *ret_ctrlr;
522 	struct nvme_driver test_driver = {};
523 
524 	memset(&ctrlr, 0, sizeof(ctrlr));
525 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
526 
527 	g_spdk_nvme_driver = &test_driver;
528 	TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
529 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
530 	CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
531 
532 	/*
533 	 * Controllers are ref counted so mock the function that returns
534 	 * the ref count so that detach will actually call the destruct
535 	 * function which we've mocked simply to verify that it gets
536 	 * called (we aren't testing what the real destruct function does
537 	 * here.)
538 	 */
539 	MOCK_SET(nvme_ctrlr_get_ref_count, 1);
540 	rc = spdk_nvme_detach(&ctrlr);
541 	ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
542 	CU_ASSERT(ret_ctrlr == NULL);
543 	CU_ASSERT(ut_destruct_called == true);
544 	CU_ASSERT(rc == 0);
545 
546 	/*
547 	 * Mock the ref count to 1 so we confirm that the destruct
548 	 * function is not called and that attached ctrl list is
549 	 * not empty.
550 	 */
551 	MOCK_SET(nvme_ctrlr_get_ref_count, 2);
552 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
553 	ut_destruct_called = false;
554 	rc = spdk_nvme_detach(&ctrlr);
555 	ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
556 	CU_ASSERT(ret_ctrlr != NULL);
557 	CU_ASSERT(ut_destruct_called == false);
558 	CU_ASSERT(rc == 0);
559 
560 	/*
561 	 * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
562 	 * shared_attached_ctrlrs list.  Test an RDMA controller and ensure it is removed
563 	 * from the correct list.
564 	 */
565 	memset(&ctrlr, 0, sizeof(ctrlr));
566 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
567 	TAILQ_INIT(&g_nvme_attached_ctrlrs);
568 	TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
569 	MOCK_SET(nvme_ctrlr_get_ref_count, 1);
570 	rc = spdk_nvme_detach(&ctrlr);
571 	CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
572 	CU_ASSERT(ut_destruct_called == true);
573 	CU_ASSERT(rc == 0);
574 
575 	g_spdk_nvme_driver = NULL;
576 	pthread_mutex_destroy(&test_driver.lock);
577 }
578 
579 static void
580 test_nvme_completion_poll_cb(void)
581 {
582 	struct nvme_completion_poll_status *status;
583 	struct spdk_nvme_cpl cpl;
584 
585 	status = calloc(1, sizeof(*status));
586 	SPDK_CU_ASSERT_FATAL(status != NULL);
587 
588 	memset(&cpl, 0xff, sizeof(cpl));
589 
590 	nvme_completion_poll_cb(status, &cpl);
591 	CU_ASSERT(status->done == true);
592 	CU_ASSERT(memcmp(&cpl, &status->cpl,
593 			 sizeof(struct spdk_nvme_cpl)) == 0);
594 
595 	free(status);
596 }
597 
598 /* stub callback used by test_nvme_user_copy_cmd_complete() */
599 static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
600 static void
601 dummy_cb(void *user_cb_arg, const struct spdk_nvme_cpl *cpl)
602 {
603 	ut_spdk_nvme_cpl  = *cpl;
604 }
605 
606 static void
607 test_nvme_user_copy_cmd_complete(void)
608 {
609 	struct nvme_request req;
610 	int test_data = 0xdeadbeef;
611 	int buff_size = sizeof(int);
612 	void *user_buffer, *buff;
613 	int user_cb_arg = 0x123;
614 	static struct spdk_nvme_cpl cpl;
615 
616 	memset(&req, 0, sizeof(req));
617 	memset(&cpl, 0x5a, sizeof(cpl));
618 
619 	/* test without a user buffer provided */
620 	req.user_cb_fn = (void *)dummy_cb;
621 	req.user_cb_arg = (void *)&user_cb_arg;
622 	nvme_user_copy_cmd_complete(&req, &cpl);
623 	CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
624 	CU_ASSERT(req.user_cb_fn == NULL);
625 	CU_ASSERT(req.user_cb_arg == NULL);
626 	CU_ASSERT(req.user_buffer == NULL);
627 
628 	/* test with a user buffer provided */
629 	user_buffer = malloc(buff_size);
630 	req.user_cb_fn = (void *)dummy_cb;
631 	req.user_cb_arg = (void *)&user_cb_arg;
632 	req.user_buffer = user_buffer;
633 	SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
634 	memset(req.user_buffer, 0, buff_size);
635 	req.payload_size = buff_size;
636 	buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
637 	SPDK_CU_ASSERT_FATAL(buff != NULL);
638 	req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
639 	memcpy(buff, &test_data, buff_size);
640 	req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
641 	req.pid = getpid();
642 
643 	/* zero out the test value set in the callback */
644 	memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
645 
646 	nvme_user_copy_cmd_complete(&req, &cpl);
647 	CU_ASSERT(memcmp(user_buffer, &test_data, buff_size) == 0);
648 	CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
649 	CU_ASSERT(req.user_cb_fn == NULL);
650 	CU_ASSERT(req.user_cb_arg == NULL);
651 	CU_ASSERT(req.user_buffer == NULL);
652 
653 	/*
654 	 * Now test the same path as above but this time choose an opc
655 	 * that results in a different data transfer type.
656 	 */
657 	memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
658 	req.user_cb_fn = (void *)dummy_cb;
659 	req.user_cb_arg = (void *)&user_cb_arg;
660 	req.user_buffer = user_buffer;
661 	memset(req.user_buffer, 0, buff_size);
662 	buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
663 	SPDK_CU_ASSERT_FATAL(buff != NULL);
664 	req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
665 	memcpy(buff, &test_data, buff_size);
666 	req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
667 	nvme_user_copy_cmd_complete(&req, &cpl);
668 	CU_ASSERT(memcmp(user_buffer, &test_data, buff_size) != 0);
669 	CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
670 	CU_ASSERT(req.user_cb_fn == NULL);
671 	CU_ASSERT(req.user_cb_arg == NULL);
672 	CU_ASSERT(req.user_buffer == NULL);
673 
674 	/* clean up */
675 	free(user_buffer);
676 }
677 
678 static void
679 test_nvme_allocate_request_null(void)
680 {
681 	struct spdk_nvme_qpair qpair = {};
682 	spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
683 	void *cb_arg = (void *)0x5678;
684 	struct nvme_request *req = NULL;
685 	struct nvme_request dummy_req;
686 
687 	STAILQ_INIT(&qpair.free_req);
688 	STAILQ_INIT(&qpair.queued_req);
689 
690 	/*
691 	 * Put a dummy on the queue so we can make a request
692 	 * and confirm that what comes back is what we expect.
693 	 */
694 	STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
695 
696 	req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
697 
698 	/*
699 	 * Compare the req with the parameters that we passed in
700 	 * as well as what the function is supposed to update.
701 	 */
702 	SPDK_CU_ASSERT_FATAL(req != NULL);
703 	CU_ASSERT(req->cb_fn == cb_fn);
704 	CU_ASSERT(req->cb_arg == cb_arg);
705 	CU_ASSERT(req->pid == getpid());
706 	CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
707 	CU_ASSERT(req->payload.md == NULL);
708 	CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
709 }
710 
711 static void
712 test_nvme_allocate_request(void)
713 {
714 	struct spdk_nvme_qpair qpair;
715 	struct nvme_payload payload;
716 	uint32_t payload_struct_size = sizeof(payload);
717 	spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
718 	void *cb_arg = (void *)0x6789;
719 	struct nvme_request *req = NULL;
720 	struct nvme_request dummy_req;
721 
722 	/* Fill the whole payload struct with a known pattern */
723 	memset(&payload, 0x5a, payload_struct_size);
724 	STAILQ_INIT(&qpair.free_req);
725 	STAILQ_INIT(&qpair.queued_req);
726 	qpair.num_outstanding_reqs = 0;
727 
728 	/* Test trying to allocate a request when no requests are available */
729 	req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
730 				    cb_fn, cb_arg);
731 	CU_ASSERT(req == NULL);
732 	CU_ASSERT(qpair.num_outstanding_reqs == 0);
733 
734 	/* put a dummy on the queue, and then allocate one */
735 	STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
736 	req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
737 				    cb_fn, cb_arg);
738 
739 	/* all the req elements should now match the passed in parameters */
740 	SPDK_CU_ASSERT_FATAL(req != NULL);
741 	CU_ASSERT(qpair.num_outstanding_reqs == 1);
742 	CU_ASSERT(req->cb_fn == cb_fn);
743 	CU_ASSERT(req->cb_arg == cb_arg);
744 	CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
745 	CU_ASSERT(req->payload_size == payload_struct_size);
746 	CU_ASSERT(req->pid == getpid());
747 }
748 
749 static void
750 test_nvme_free_request(void)
751 {
752 	struct nvme_request match_req;
753 	struct spdk_nvme_qpair qpair = {0};
754 	struct nvme_request *req;
755 
756 	/* put a req on the Q, take it off and compare */
757 	memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
758 	match_req.qpair = &qpair;
759 	qpair.num_outstanding_reqs = 1;
760 	/* the code under tests asserts this condition */
761 	match_req.num_children = 0;
762 	STAILQ_INIT(&qpair.free_req);
763 	match_req.qpair->reserved_req = NULL;
764 
765 	nvme_free_request(&match_req);
766 	req = STAILQ_FIRST(&match_req.qpair->free_req);
767 	CU_ASSERT(req == &match_req);
768 	CU_ASSERT(qpair.num_outstanding_reqs == 0);
769 }
770 
771 static void
772 test_nvme_allocate_request_user_copy(void)
773 {
774 	struct spdk_nvme_qpair qpair;
775 	spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
776 	void *cb_arg = (void *)0x12345;
777 	bool host_to_controller = true;
778 	struct nvme_request *req;
779 	struct nvme_request dummy_req;
780 	int test_data = 0xdeadbeef;
781 	void *buffer = NULL;
782 	uint32_t payload_size = sizeof(int);
783 
784 	STAILQ_INIT(&qpair.free_req);
785 	STAILQ_INIT(&qpair.queued_req);
786 
787 	/* no buffer or valid payload size, early NULL return */
788 	req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
789 					      cb_arg, host_to_controller);
790 	CU_ASSERT(req == NULL);
791 
792 	/* good buffer and valid payload size */
793 	buffer = malloc(payload_size);
794 	SPDK_CU_ASSERT_FATAL(buffer != NULL);
795 	memcpy(buffer, &test_data, payload_size);
796 
797 	/* put a dummy on the queue */
798 	STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
799 
800 	MOCK_CLEAR(spdk_malloc);
801 	MOCK_CLEAR(spdk_zmalloc);
802 	req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
803 					      cb_arg, host_to_controller);
804 	SPDK_CU_ASSERT_FATAL(req != NULL);
805 	CU_ASSERT(req->user_cb_fn == cb_fn);
806 	CU_ASSERT(req->user_cb_arg == cb_arg);
807 	CU_ASSERT(req->user_buffer == buffer);
808 	CU_ASSERT(req->cb_arg == req);
809 	CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
810 	spdk_free(req->payload.contig_or_cb_arg);
811 
812 	/* same thing but additional path coverage, no copy */
813 	host_to_controller = false;
814 	STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
815 
816 	req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
817 					      cb_arg, host_to_controller);
818 	SPDK_CU_ASSERT_FATAL(req != NULL);
819 	CU_ASSERT(req->user_cb_fn == cb_fn);
820 	CU_ASSERT(req->user_cb_arg == cb_arg);
821 	CU_ASSERT(req->user_buffer == buffer);
822 	CU_ASSERT(req->cb_arg == req);
823 	CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
824 	spdk_free(req->payload.contig_or_cb_arg);
825 
826 	/* good buffer and valid payload size but make spdk_zmalloc fail */
827 	/* set the mock pointer to NULL for spdk_zmalloc */
828 	MOCK_SET(spdk_zmalloc, NULL);
829 	req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
830 					      cb_arg, host_to_controller);
831 	CU_ASSERT(req == NULL);
832 	free(buffer);
833 	MOCK_CLEAR(spdk_zmalloc);
834 }
835 
836 static void
837 test_nvme_ctrlr_probe(void)
838 {
839 	int rc = 0;
840 	struct spdk_nvme_ctrlr ctrlr = {};
841 	struct spdk_nvme_qpair qpair = {};
842 	const struct spdk_nvme_transport_id trid = {};
843 	struct spdk_nvme_probe_ctx probe_ctx = {};
844 	void *devhandle = NULL;
845 	void *cb_ctx = NULL;
846 	struct spdk_nvme_ctrlr *dummy = NULL;
847 
848 	nvme_driver_init();
849 
850 	ctrlr.adminq = &qpair;
851 	nvme_get_default_hostnqn(ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn));
852 
853 	TAILQ_INIT(&probe_ctx.init_ctrlrs);
854 	TAILQ_INIT(&probe_ctx.failed_ctxs.head);
855 
856 	/* test when probe_cb returns false */
857 
858 	MOCK_SET(dummy_probe_cb, false);
859 	nvme_probe_ctx_init(&probe_ctx, &trid, NULL, cb_ctx, dummy_probe_cb, NULL, NULL, NULL);
860 	rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
861 	CU_ASSERT(rc == 1);
862 
863 	/* probe_cb returns true but we find a destructing ctrlr */
864 	MOCK_SET(dummy_probe_cb, true);
865 	ut_attach_fail_cb_rc = 0;
866 	ctrlr.is_destructed = true;
867 	TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
868 	nvme_probe_ctx_init(&probe_ctx, &trid, NULL, cb_ctx, dummy_probe_cb, NULL,
869 			    dummy_attach_fail_cb, NULL);
870 	rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
871 	CU_ASSERT(rc == -EBUSY);
872 	CU_ASSERT(ut_attach_fail_cb_rc == -EBUSY);
873 	TAILQ_REMOVE(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
874 	ctrlr.is_destructed = false;
875 
876 	/* probe_cb returns true but we can't construct a ctrl */
877 	MOCK_SET(dummy_probe_cb, true);
878 	MOCK_SET(nvme_transport_ctrlr_construct, NULL);
879 	ut_attach_fail_cb_rc = 0;
880 	nvme_probe_ctx_init(&probe_ctx, &trid, NULL, cb_ctx, dummy_probe_cb, NULL,
881 			    dummy_attach_fail_cb, NULL);
882 	rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
883 	CU_ASSERT(rc == -1);
884 	CU_ASSERT(ut_attach_fail_cb_rc == -ENODEV);
885 
886 	/* happy path */
887 	MOCK_SET(dummy_probe_cb, true);
888 	MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
889 	nvme_probe_ctx_init(&probe_ctx, &trid, NULL, cb_ctx, dummy_probe_cb, NULL, NULL, NULL);
890 	rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
891 	CU_ASSERT(rc == 0);
892 	dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs);
893 	SPDK_CU_ASSERT_FATAL(dummy != NULL);
894 	CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
895 	TAILQ_REMOVE(&probe_ctx.init_ctrlrs, dummy, tailq);
896 	MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
897 
898 	free(g_spdk_nvme_driver);
899 }
900 
901 static void
902 test_nvme_robust_mutex_init_shared(void)
903 {
904 	pthread_mutex_t mtx;
905 	int rc = 0;
906 
907 	/* test where both pthread calls succeed */
908 	MOCK_SET(pthread_mutexattr_init, 0);
909 	MOCK_SET(pthread_mutex_init, 0);
910 	rc = nvme_robust_mutex_init_shared(&mtx);
911 	CU_ASSERT(rc == 0);
912 
913 	/* test where we can't init attr's but init mutex works */
914 	MOCK_SET(pthread_mutexattr_init, -1);
915 	MOCK_SET(pthread_mutex_init, 0);
916 	rc = nvme_robust_mutex_init_shared(&mtx);
917 	/* for FreeBSD the only possible return value is 0 */
918 #ifndef __FreeBSD__
919 	CU_ASSERT(rc != 0);
920 #else
921 	CU_ASSERT(rc == 0);
922 #endif
923 
924 	/* test where we can init attr's but the mutex init fails */
925 	MOCK_SET(pthread_mutexattr_init, 0);
926 	MOCK_SET(pthread_mutex_init, -1);
927 	rc = nvme_robust_mutex_init_shared(&mtx);
928 	/* for FreeBSD the only possible return value is 0 */
929 #ifndef __FreeBSD__
930 	CU_ASSERT(rc != 0);
931 #else
932 	CU_ASSERT(rc == 0);
933 #endif
934 	MOCK_CLEAR(pthread_mutex_init);
935 }
936 
937 static void
938 test_opc_data_transfer(void)
939 {
940 	enum spdk_nvme_data_transfer xfer;
941 
942 	xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
943 	CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
944 
945 	xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
946 	CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
947 
948 	xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
949 	CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
950 
951 	xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
952 	CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
953 }
954 
955 static void
956 test_trid_parse_and_compare(void)
957 {
958 	struct spdk_nvme_transport_id trid1, trid2;
959 	int ret;
960 
961 	/* set trid1 trid2 value to id parse */
962 	ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
963 	CU_ASSERT(ret == -EINVAL);
964 	memset(&trid1, 0, sizeof(trid1));
965 	ret = spdk_nvme_transport_id_parse(&trid1, NULL);
966 	CU_ASSERT(ret == -EINVAL);
967 	ret = spdk_nvme_transport_id_parse(NULL, NULL);
968 	CU_ASSERT(ret == -EINVAL);
969 	memset(&trid1, 0, sizeof(trid1));
970 	ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
971 	CU_ASSERT(ret == -EINVAL);
972 	memset(&trid1, 0, sizeof(trid1));
973 	ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
974 	CU_ASSERT(ret == -EINVAL);
975 	memset(&trid1, 0, sizeof(trid1));
976 	ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
977 	CU_ASSERT(ret == -EINVAL);
978 	memset(&trid1, 0, sizeof(trid1));
979 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
980 					       "trtype:rdma\n"
981 					       "adrfam:ipv4\n"
982 					       "traddr:192.168.100.8\n"
983 					       "trsvcid:4420\n"
984 					       "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
985 	CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
986 	CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
987 	CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
988 	CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
989 	CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
990 
991 	memset(&trid2, 0, sizeof(trid2));
992 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
993 	CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
994 	CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
995 
996 	CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
997 
998 	/* set trid1 trid2 and test id_compare */
999 	memset_trid(&trid1, &trid2);
1000 	trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
1001 	trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1002 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1003 	CU_ASSERT(ret > 0);
1004 
1005 	memset_trid(&trid1, &trid2);
1006 	snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
1007 	snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
1008 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1009 	CU_ASSERT(ret < 0);
1010 
1011 	memset_trid(&trid1, &trid2);
1012 	snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
1013 	snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
1014 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1015 	CU_ASSERT(ret < 0);
1016 
1017 	memset_trid(&trid1, &trid2);
1018 	snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
1019 	snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
1020 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1021 	CU_ASSERT(ret < 0);
1022 
1023 	memset_trid(&trid1, &trid2);
1024 	snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
1025 	snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
1026 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1027 	CU_ASSERT(ret == 0);
1028 
1029 	memset_trid(&trid1, &trid2);
1030 	snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
1031 	snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
1032 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1033 	CU_ASSERT(ret > 0);
1034 
1035 	memset_trid(&trid1, &trid2);
1036 	ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
1037 	CU_ASSERT(ret == 0);
1038 
1039 	/* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
1040 	memset_trid(&trid1, &trid2);
1041 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
1042 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
1043 	CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
1044 
1045 	memset_trid(&trid1, &trid2);
1046 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
1047 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
1048 	CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
1049 
1050 	memset_trid(&trid1, &trid2);
1051 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
1052 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
1053 	CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
1054 
1055 	memset_trid(&trid1, &trid2);
1056 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
1057 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
1058 	CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
1059 
1060 	CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
1061 					       "trtype:tcp\n"
1062 					       "adrfam:ipv4\n"
1063 					       "traddr:192.168.100.8\n"
1064 					       "trsvcid:4420\n"
1065 					       "priority:2\n"
1066 					       "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
1067 	CU_ASSERT(trid1.priority == 2);
1068 }
1069 
1070 static void
1071 test_spdk_nvme_transport_id_parse_trtype(void)
1072 {
1073 
1074 	enum spdk_nvme_transport_type *trtype;
1075 	enum spdk_nvme_transport_type sct;
1076 	char *str;
1077 
1078 	trtype = NULL;
1079 	str = "unit_test";
1080 
1081 	/* test function returned value when trtype is NULL but str not NULL */
1082 	CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
1083 
1084 	/* test function returned value when str is NULL but trtype not NULL */
1085 	trtype = &sct;
1086 	str = NULL;
1087 	CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
1088 
1089 	/* test function returned value when str and strtype not NULL, but str value
1090 	 * not "PCIe" or "RDMA" */
1091 	str = "unit_test";
1092 	CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == 0);
1093 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_CUSTOM);
1094 
1095 	/* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
1096 	str = "PCIe";
1097 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1098 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
1099 
1100 	str = "pciE";
1101 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1102 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
1103 
1104 	/* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
1105 	str = "RDMA";
1106 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1107 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
1108 
1109 	str = "rdma";
1110 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1111 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
1112 
1113 	/* test trtype value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
1114 	str = "FC";
1115 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1116 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
1117 
1118 	str = "fc";
1119 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1120 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
1121 
1122 	/* test trtype value when use function "strcasecmp" to compare str and "TCP",not case-sensitive */
1123 	str = "TCP";
1124 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1125 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
1126 
1127 	str = "tcp";
1128 	spdk_nvme_transport_id_parse_trtype(trtype, str);
1129 	CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
1130 }
1131 
1132 static void
1133 test_spdk_nvme_transport_id_parse_adrfam(void)
1134 {
1135 
1136 	enum spdk_nvmf_adrfam *adrfam;
1137 	enum spdk_nvmf_adrfam sct;
1138 	char *str;
1139 
1140 	adrfam = NULL;
1141 	str = "unit_test";
1142 
1143 	/* test function returned value when adrfam is NULL but str not NULL */
1144 	CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
1145 
1146 	/* test function returned value when str is NULL but adrfam not NULL */
1147 	adrfam = &sct;
1148 	str = NULL;
1149 	CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
1150 
1151 	/* test function returned value when str and adrfam not NULL, but str value
1152 	 * not "IPv4" or "IPv6" or "IB" or "FC" */
1153 	str = "unit_test";
1154 	CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
1155 
1156 	/* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
1157 	str = "IPv4";
1158 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1159 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
1160 
1161 	str = "ipV4";
1162 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1163 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
1164 
1165 	/* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
1166 	str = "IPv6";
1167 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1168 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
1169 
1170 	str = "ipV6";
1171 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1172 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
1173 
1174 	/* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
1175 	str = "IB";
1176 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1177 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
1178 
1179 	str = "ib";
1180 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1181 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
1182 
1183 	/* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
1184 	str = "FC";
1185 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1186 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
1187 
1188 	str = "fc";
1189 	spdk_nvme_transport_id_parse_adrfam(adrfam, str);
1190 	CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
1191 
1192 }
1193 
1194 static void
1195 test_trid_trtype_str(void)
1196 {
1197 	const char *s;
1198 
1199 	s = spdk_nvme_transport_id_trtype_str(-5);
1200 	CU_ASSERT(s == NULL);
1201 
1202 	s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
1203 	SPDK_CU_ASSERT_FATAL(s != NULL);
1204 	CU_ASSERT(strcmp(s, "PCIe") == 0);
1205 
1206 	s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
1207 	SPDK_CU_ASSERT_FATAL(s != NULL);
1208 	CU_ASSERT(strcmp(s, "RDMA") == 0);
1209 
1210 	s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_FC);
1211 	SPDK_CU_ASSERT_FATAL(s != NULL);
1212 	CU_ASSERT(strcmp(s, "FC") == 0);
1213 
1214 	s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_TCP);
1215 	SPDK_CU_ASSERT_FATAL(s != NULL);
1216 	CU_ASSERT(strcmp(s, "TCP") == 0);
1217 }
1218 
1219 static void
1220 test_trid_adrfam_str(void)
1221 {
1222 	const char *s;
1223 
1224 	s = spdk_nvme_transport_id_adrfam_str(-5);
1225 	CU_ASSERT(s == NULL);
1226 
1227 	s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
1228 	SPDK_CU_ASSERT_FATAL(s != NULL);
1229 	CU_ASSERT(strcmp(s, "IPv4") == 0);
1230 
1231 	s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
1232 	SPDK_CU_ASSERT_FATAL(s != NULL);
1233 	CU_ASSERT(strcmp(s, "IPv6") == 0);
1234 
1235 	s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
1236 	SPDK_CU_ASSERT_FATAL(s != NULL);
1237 	CU_ASSERT(strcmp(s, "IB") == 0);
1238 
1239 	s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
1240 	SPDK_CU_ASSERT_FATAL(s != NULL);
1241 	CU_ASSERT(strcmp(s, "FC") == 0);
1242 }
1243 
1244 /* stub callback used by the test_nvme_request_check_timeout */
1245 static bool ut_timeout_cb_call = false;
1246 static void
1247 dummy_timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
1248 		 struct spdk_nvme_qpair *qpair, uint16_t cid)
1249 {
1250 	ut_timeout_cb_call = true;
1251 }
1252 
1253 static void
1254 test_nvme_request_check_timeout(void)
1255 {
1256 	int rc;
1257 	struct spdk_nvme_qpair qpair;
1258 	struct nvme_request req;
1259 	struct spdk_nvme_ctrlr_process active_proc;
1260 	uint16_t cid = 0;
1261 	uint64_t now_tick = 0;
1262 
1263 	memset(&qpair, 0x0, sizeof(qpair));
1264 	memset(&req, 0x0, sizeof(req));
1265 	memset(&active_proc, 0x0, sizeof(active_proc));
1266 	req.qpair = &qpair;
1267 	active_proc.timeout_cb_fn = dummy_timeout_cb;
1268 
1269 	/* if have called timeout_cb_fn then return directly */
1270 	req.timed_out = true;
1271 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1272 	CU_ASSERT(rc == 0);
1273 	CU_ASSERT(ut_timeout_cb_call == false);
1274 
1275 	/* if timeout isn't enabled then return directly */
1276 	req.timed_out = false;
1277 	req.submit_tick = 0;
1278 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1279 	CU_ASSERT(rc == 0);
1280 	CU_ASSERT(ut_timeout_cb_call == false);
1281 
1282 	/* req->pid isn't right then return directly */
1283 	req.submit_tick = 1;
1284 	req.pid = g_spdk_nvme_pid + 1;
1285 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1286 	CU_ASSERT(rc == 0);
1287 	CU_ASSERT(ut_timeout_cb_call == false);
1288 
1289 	/* AER command has no timeout */
1290 	req.pid = g_spdk_nvme_pid;
1291 	req.cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
1292 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1293 	CU_ASSERT(rc == 0);
1294 	CU_ASSERT(ut_timeout_cb_call == false);
1295 
1296 	/* time isn't out */
1297 	qpair.id = 1;
1298 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1299 	CU_ASSERT(rc == 1);
1300 	CU_ASSERT(ut_timeout_cb_call == false);
1301 
1302 	now_tick = 2;
1303 	rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
1304 	CU_ASSERT(req.timed_out == true);
1305 	CU_ASSERT(ut_timeout_cb_call == true);
1306 	CU_ASSERT(rc == 0);
1307 }
1308 
1309 struct nvme_completion_poll_status g_status;
1310 uint64_t completion_delay_us, timeout_in_usecs;
1311 int g_process_comp_result;
1312 pthread_mutex_t g_robust_lock = PTHREAD_MUTEX_INITIALIZER;
1313 
1314 int
1315 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
1316 {
1317 	spdk_delay_us(completion_delay_us);
1318 
1319 	g_status.done = completion_delay_us < timeout_in_usecs && g_process_comp_result == 0 ? true : false;
1320 
1321 	return g_process_comp_result;
1322 }
1323 
1324 static void
1325 test_nvme_wait_for_completion(void)
1326 {
1327 	struct spdk_nvme_qpair qpair;
1328 	struct spdk_nvme_ctrlr ctrlr;
1329 	int rc = 0;
1330 
1331 	memset(&ctrlr, 0, sizeof(ctrlr));
1332 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1333 	memset(&qpair, 0, sizeof(qpair));
1334 	qpair.ctrlr = &ctrlr;
1335 
1336 	/* completion timeout */
1337 	memset(&g_status, 0, sizeof(g_status));
1338 	completion_delay_us = 2000000;
1339 	timeout_in_usecs = 1000000;
1340 	rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs);
1341 	CU_ASSERT(g_status.timed_out == true);
1342 	CU_ASSERT(g_status.done == false);
1343 	CU_ASSERT(rc == -ECANCELED);
1344 
1345 	/* spdk_nvme_qpair_process_completions returns error */
1346 	memset(&g_status, 0, sizeof(g_status));
1347 	g_process_comp_result = -1;
1348 	completion_delay_us = 1000000;
1349 	timeout_in_usecs = 2000000;
1350 	rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs);
1351 	CU_ASSERT(rc == -ECANCELED);
1352 	CU_ASSERT(g_status.timed_out == true);
1353 	CU_ASSERT(g_status.done == false);
1354 	CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1355 	CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
1356 
1357 	g_process_comp_result = 0;
1358 
1359 	/* complete in time */
1360 	memset(&g_status, 0, sizeof(g_status));
1361 	completion_delay_us = 1000000;
1362 	timeout_in_usecs = 2000000;
1363 	rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_usecs);
1364 	CU_ASSERT(g_status.timed_out == false);
1365 	CU_ASSERT(g_status.done == true);
1366 	CU_ASSERT(rc == 0);
1367 
1368 	/* nvme_wait_for_completion */
1369 	/* spdk_nvme_qpair_process_completions returns error */
1370 	memset(&g_status, 0, sizeof(g_status));
1371 	g_process_comp_result = -1;
1372 	rc = nvme_wait_for_completion(&qpair, &g_status);
1373 	CU_ASSERT(rc == -ECANCELED);
1374 	CU_ASSERT(g_status.timed_out == true);
1375 	CU_ASSERT(g_status.done == false);
1376 	CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1377 	CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
1378 
1379 	/* successful completion */
1380 	memset(&g_status, 0, sizeof(g_status));
1381 	g_process_comp_result = 0;
1382 	rc = nvme_wait_for_completion(&qpair, &g_status);
1383 	CU_ASSERT(rc == 0);
1384 	CU_ASSERT(g_status.timed_out == false);
1385 	CU_ASSERT(g_status.done == true);
1386 
1387 	/* completion  timeout */
1388 	memset(&g_status, 0, sizeof(g_status));
1389 	completion_delay_us = 2000000;
1390 	timeout_in_usecs = 1000000;
1391 	rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
1392 			timeout_in_usecs);
1393 	CU_ASSERT(g_status.timed_out == true);
1394 	CU_ASSERT(g_status.done == false);
1395 	CU_ASSERT(rc == -ECANCELED);
1396 
1397 	/* spdk_nvme_qpair_process_completions returns error */
1398 	memset(&g_status, 0, sizeof(g_status));
1399 	g_process_comp_result = -1;
1400 	completion_delay_us = 1000000;
1401 	timeout_in_usecs = 2000000;
1402 	rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
1403 			timeout_in_usecs);
1404 	CU_ASSERT(rc == -ECANCELED);
1405 	CU_ASSERT(g_status.timed_out == true);
1406 	CU_ASSERT(g_status.done == false);
1407 	CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1408 	CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
1409 
1410 	g_process_comp_result = 0;
1411 
1412 	/* complete in time */
1413 	memset(&g_status, 0, sizeof(g_status));
1414 	completion_delay_us = 1000000;
1415 	timeout_in_usecs = 2000000;
1416 	rc = nvme_wait_for_completion_robust_lock_timeout(&qpair, &g_status, &g_robust_lock,
1417 			timeout_in_usecs);
1418 	CU_ASSERT(g_status.timed_out == false);
1419 	CU_ASSERT(g_status.done == true);
1420 	CU_ASSERT(rc == 0);
1421 
1422 	/* nvme_wait_for_completion */
1423 	/* spdk_nvme_qpair_process_completions returns error */
1424 	memset(&g_status, 0, sizeof(g_status));
1425 	g_process_comp_result = -1;
1426 	rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock);
1427 	CU_ASSERT(rc == -ECANCELED);
1428 	CU_ASSERT(g_status.timed_out == true);
1429 	CU_ASSERT(g_status.done == false);
1430 	CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1431 	CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
1432 
1433 	/* successful completion */
1434 	memset(&g_status, 0, sizeof(g_status));
1435 	g_process_comp_result = 0;
1436 	rc = nvme_wait_for_completion_robust_lock(&qpair, &g_status, &g_robust_lock);
1437 	CU_ASSERT(rc == 0);
1438 	CU_ASSERT(g_status.timed_out == false);
1439 	CU_ASSERT(g_status.done == true);
1440 }
1441 
1442 static void
1443 test_nvme_ctrlr_probe_internal(void)
1444 {
1445 	struct spdk_nvme_probe_ctx *probe_ctx;
1446 	struct spdk_nvme_transport_id trid = {};
1447 	struct nvme_driver dummy = {};
1448 	int rc;
1449 
1450 	probe_ctx = calloc(1, sizeof(*probe_ctx));
1451 	CU_ASSERT(probe_ctx != NULL);
1452 
1453 	MOCK_SET(spdk_process_is_primary, true);
1454 	MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
1455 	g_spdk_nvme_driver = NULL;
1456 	rc = nvme_driver_init();
1457 	CU_ASSERT(rc == 0);
1458 
1459 	ut_attach_fail_cb_rc = 0;
1460 	ut_test_probe_internal = true;
1461 	MOCK_SET(dummy_probe_cb, true);
1462 	trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1463 	nvme_probe_ctx_init(probe_ctx, &trid, NULL, NULL, dummy_probe_cb, NULL, dummy_attach_fail_cb,
1464 			    NULL);
1465 	rc = nvme_probe_internal(probe_ctx, false);
1466 	CU_ASSERT(rc < 0);
1467 	CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs));
1468 	CU_ASSERT(TAILQ_EMPTY(&probe_ctx->failed_ctxs.head));
1469 	CU_ASSERT(ut_attach_fail_cb_rc == -EFAULT);
1470 
1471 	free(probe_ctx);
1472 	ut_test_probe_internal = false;
1473 }
1474 
1475 static void
1476 test_spdk_nvme_parse_func(void)
1477 {
1478 	struct spdk_nvme_host_id hostid = {};
1479 	char str[64] = {};
1480 	const char *rt_str = NULL;
1481 	uint32_t prchk_flags;
1482 	int rc;
1483 
1484 	/* Parse prchk flags. */
1485 	prchk_flags = 0;
1486 	rt_str = spdk_nvme_prchk_flags_str(SPDK_NVME_IO_FLAGS_PRCHK_REFTAG);
1487 	memcpy(str, rt_str, strlen(rt_str));
1488 
1489 	rc = spdk_nvme_prchk_flags_parse(&prchk_flags, str);
1490 	CU_ASSERT(rc == 0);
1491 	CU_ASSERT(prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG);
1492 
1493 	prchk_flags = 0;
1494 	rt_str = spdk_nvme_prchk_flags_str(SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
1495 	memcpy(str, rt_str, strlen(rt_str));
1496 
1497 	rc = spdk_nvme_prchk_flags_parse(&prchk_flags, str);
1498 	CU_ASSERT(prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
1499 	CU_ASSERT(rc == 0);
1500 
1501 	prchk_flags = 0;
1502 	rt_str = spdk_nvme_prchk_flags_str(SPDK_NVME_IO_FLAGS_PRCHK_REFTAG |
1503 					   SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
1504 	memcpy(str, rt_str, strlen(rt_str));
1505 
1506 	rc = spdk_nvme_prchk_flags_parse(&prchk_flags, str);
1507 	CU_ASSERT(rc == 0);
1508 	CU_ASSERT(prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_REFTAG);
1509 	CU_ASSERT(prchk_flags & SPDK_NVME_IO_FLAGS_PRCHK_GUARD);
1510 
1511 	rc = spdk_nvme_prchk_flags_parse(NULL, NULL);
1512 	CU_ASSERT(rc == -EINVAL);
1513 
1514 	/* Parse host id. */
1515 	memcpy(str, "hostaddr:192.168.1.1", sizeof("hostaddr:192.168.1.1"));
1516 	rc = spdk_nvme_host_id_parse(&hostid, str);
1517 	CU_ASSERT(rc == 0);
1518 	CU_ASSERT(!strncmp(hostid.hostaddr, "192.168.1.1", sizeof("192.168.1.1")));
1519 
1520 	memset(&hostid, 0, sizeof(hostid));
1521 	memcpy(str, "hostsvcid:192.168.1.2", sizeof("hostsvcid:192.168.1.2"));
1522 	rc = spdk_nvme_host_id_parse(&hostid, str);
1523 	CU_ASSERT(rc == 0);
1524 	CU_ASSERT(!strncmp(hostid.hostsvcid, "192.168.1.2", sizeof("192.168.1.2")));
1525 
1526 	/* Unknown transport ID key */
1527 	memset(&hostid, 0, sizeof(hostid));
1528 	memcpy(str, "trtype:xxx", sizeof("trtype:xxx"));
1529 	rc = spdk_nvme_host_id_parse(&hostid, str);
1530 	CU_ASSERT(rc == 0);
1531 	CU_ASSERT(hostid.hostaddr[0] == '\0' && hostid.hostsvcid[0] == '\0');
1532 }
1533 
1534 static void
1535 test_spdk_nvme_detach_async(void)
1536 {
1537 	int rc = 1;
1538 	struct spdk_nvme_ctrlr ctrlr1, ctrlr2;
1539 	struct nvme_driver test_driver = {};
1540 	struct spdk_nvme_detach_ctx *detach_ctx;
1541 	struct nvme_ctrlr_detach_ctx *ctx;
1542 
1543 	detach_ctx = NULL;
1544 	memset(&ctrlr1, 0, sizeof(ctrlr1));
1545 	ctrlr1.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1546 	memset(&ctrlr2, 0, sizeof(ctrlr2));
1547 	ctrlr2.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1548 
1549 	g_spdk_nvme_driver = &test_driver;
1550 	TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
1551 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr1, tailq);
1552 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr2, tailq);
1553 	CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
1554 	MOCK_SET(nvme_ctrlr_get_ref_count, 1);
1555 
1556 	rc = spdk_nvme_detach_async(&ctrlr1, &detach_ctx);
1557 	CU_ASSERT(rc == 0);
1558 	CU_ASSERT(ctrlr1.is_destructed == true);
1559 	CU_ASSERT(detach_ctx != NULL);
1560 
1561 	rc = spdk_nvme_detach_async(&ctrlr2, &detach_ctx);
1562 	CU_ASSERT(rc == 0);
1563 	CU_ASSERT(ctrlr2.is_destructed == true);
1564 	CU_ASSERT(detach_ctx != NULL);
1565 
1566 	CU_ASSERT(TAILQ_EMPTY(&test_driver.shared_attached_ctrlrs) == false);
1567 
1568 	rc = spdk_nvme_detach_poll_async(detach_ctx);
1569 	CU_ASSERT(rc == 0);
1570 	CU_ASSERT(TAILQ_EMPTY(&test_driver.shared_attached_ctrlrs) == true);
1571 
1572 	/* ctrlr1 is a PCIe controller but ctrlr2 is an non-PCIe controller.
1573 	 * Even for this case, detachment should complete successfully.
1574 	 */
1575 	detach_ctx = NULL;
1576 	memset(&ctrlr1, 0, sizeof(ctrlr1));
1577 	ctrlr1.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1578 	memset(&ctrlr2, 0, sizeof(ctrlr2));
1579 	ctrlr2.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1580 	TAILQ_INIT(&g_nvme_attached_ctrlrs);
1581 	TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr1, tailq);
1582 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr2, tailq);
1583 
1584 	rc = spdk_nvme_detach_async(&ctrlr1, &detach_ctx);
1585 	CU_ASSERT(rc == 0);
1586 	CU_ASSERT(ctrlr1.is_destructed == true);
1587 	CU_ASSERT(detach_ctx != NULL);
1588 
1589 	rc = spdk_nvme_detach_async(&ctrlr2, &detach_ctx);
1590 	CU_ASSERT(rc == 0);
1591 	CU_ASSERT(ctrlr2.is_destructed == true);
1592 	CU_ASSERT(detach_ctx != NULL);
1593 
1594 	CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs) == false);
1595 	CU_ASSERT(TAILQ_EMPTY(&test_driver.shared_attached_ctrlrs) == false);
1596 
1597 	rc = spdk_nvme_detach_poll_async(detach_ctx);
1598 	CU_ASSERT(rc == 0);
1599 	CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs) == true);
1600 	CU_ASSERT(TAILQ_EMPTY(&test_driver.shared_attached_ctrlrs) == true);
1601 
1602 	/* Test if ctrlr2 can be detached by using the same context that
1603 	 * ctrlr1 uses while ctrlr1 is being detached.
1604 	 */
1605 	detach_ctx = NULL;
1606 	memset(&ctrlr1, 0, sizeof(ctrlr1));
1607 	ctrlr1.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1608 	memset(&ctrlr2, 0, sizeof(ctrlr2));
1609 	ctrlr2.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
1610 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr1, tailq);
1611 	TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr2, tailq);
1612 
1613 	rc = spdk_nvme_detach_async(&ctrlr1, &detach_ctx);
1614 	CU_ASSERT(rc == 0);
1615 	CU_ASSERT(ctrlr1.is_destructed == true);
1616 	SPDK_CU_ASSERT_FATAL(detach_ctx != NULL);
1617 
1618 	ctx = TAILQ_FIRST(&detach_ctx->head);
1619 	SPDK_CU_ASSERT_FATAL(ctx != NULL);
1620 	CU_ASSERT(ctx->ctrlr == &ctrlr1);
1621 	CU_ASSERT(ctx->shutdown_complete == true);
1622 
1623 	/* Set ctx->shutdown_complete for ctrlr1 to false to allow ctrlr2 to
1624 	 * add to detach_ctx while spdk_nvme_detach_poll_async() is being
1625 	 * executed.
1626 	 */
1627 	ctx->shutdown_complete = false;
1628 
1629 	rc = spdk_nvme_detach_poll_async(detach_ctx);
1630 	CU_ASSERT(rc == -EAGAIN);
1631 
1632 	rc = spdk_nvme_detach_async(&ctrlr2, &detach_ctx);
1633 	CU_ASSERT(rc == 0);
1634 	CU_ASSERT(ctrlr2.is_destructed == true);
1635 
1636 	/* After ctrlr2 is added to detach_ctx, set ctx->shutdown_complete for
1637 	 * ctrlr1 to true to complete spdk_nvme_detach_poll_async().
1638 	 */
1639 	ctx->shutdown_complete = true;
1640 
1641 	rc = spdk_nvme_detach_poll_async(detach_ctx);
1642 	CU_ASSERT(rc == 0);
1643 	CU_ASSERT(TAILQ_EMPTY(&test_driver.shared_attached_ctrlrs) == true);
1644 
1645 	g_spdk_nvme_driver = NULL;
1646 	pthread_mutex_destroy(&test_driver.lock);
1647 	MOCK_CLEAR(nvme_ctrlr_get_ref_count);
1648 }
1649 
1650 static void
1651 test_nvme_parse_addr(void)
1652 {
1653 	struct sockaddr_storage dst_addr;
1654 	int rc = 0;
1655 	long int port;
1656 
1657 	memset(&dst_addr, 0, sizeof(dst_addr));
1658 	/* case1: getaddrinfo failed */
1659 	rc = nvme_parse_addr(&dst_addr, AF_INET, NULL, NULL, &port);
1660 	CU_ASSERT(rc != 0);
1661 
1662 	/* case2: res->ai_addrlen < sizeof(*sa). Expect: Pass. */
1663 	rc = nvme_parse_addr(&dst_addr, AF_INET, "12.34.56.78", "23", &port);
1664 	CU_ASSERT(rc == 0);
1665 	CU_ASSERT(port == 23);
1666 	CU_ASSERT(dst_addr.ss_family == AF_INET);
1667 }
1668 
1669 int
1670 main(int argc, char **argv)
1671 {
1672 	CU_pSuite	suite = NULL;
1673 	unsigned int	num_failures;
1674 
1675 	CU_initialize_registry();
1676 
1677 	suite = CU_add_suite("nvme", NULL, NULL);
1678 
1679 	CU_ADD_TEST(suite, test_opc_data_transfer);
1680 	CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_trtype);
1681 	CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_adrfam);
1682 	CU_ADD_TEST(suite, test_trid_parse_and_compare);
1683 	CU_ADD_TEST(suite, test_trid_trtype_str);
1684 	CU_ADD_TEST(suite, test_trid_adrfam_str);
1685 	CU_ADD_TEST(suite, test_nvme_ctrlr_probe);
1686 	CU_ADD_TEST(suite, test_spdk_nvme_probe_ext);
1687 	CU_ADD_TEST(suite, test_spdk_nvme_connect);
1688 	CU_ADD_TEST(suite, test_nvme_ctrlr_probe_internal);
1689 	CU_ADD_TEST(suite, test_nvme_init_controllers);
1690 	CU_ADD_TEST(suite, test_nvme_driver_init);
1691 	CU_ADD_TEST(suite, test_spdk_nvme_detach);
1692 	CU_ADD_TEST(suite, test_nvme_completion_poll_cb);
1693 	CU_ADD_TEST(suite, test_nvme_user_copy_cmd_complete);
1694 	CU_ADD_TEST(suite, test_nvme_allocate_request_null);
1695 	CU_ADD_TEST(suite, test_nvme_allocate_request);
1696 	CU_ADD_TEST(suite, test_nvme_free_request);
1697 	CU_ADD_TEST(suite, test_nvme_allocate_request_user_copy);
1698 	CU_ADD_TEST(suite, test_nvme_robust_mutex_init_shared);
1699 	CU_ADD_TEST(suite, test_nvme_request_check_timeout);
1700 	CU_ADD_TEST(suite, test_nvme_wait_for_completion);
1701 	CU_ADD_TEST(suite, test_spdk_nvme_parse_func);
1702 	CU_ADD_TEST(suite, test_spdk_nvme_detach_async);
1703 	CU_ADD_TEST(suite, test_nvme_parse_addr);
1704 
1705 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1706 	CU_cleanup_registry();
1707 	return num_failures;
1708 }
1709