xref: /spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c (revision 3ab7a1f64641a530fea0c89c8ba1ce3c8a004f22)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #define UNIT_TEST_NO_VTOPHYS
12 
13 #include "nvme/nvme_pcie.c"
14 #include "nvme/nvme_pcie_common.c"
15 #include "common/lib/nvme/common_stubs.h"
16 
17 pid_t g_spdk_nvme_pid;
18 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
19 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
20 
21 DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
22 
23 DEFINE_STUB(nvme_wait_for_completion, int,
24 	    (struct spdk_nvme_qpair *qpair,
25 	     struct nvme_completion_poll_status *status), 0);
26 DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
27 
28 DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
29 		struct nvme_request *req), 0);
30 
31 DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
32 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
33 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
34 	    SPDK_ENV_NUMA_ID_ANY);
35 
36 DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
37 	    0);
38 DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
39 		void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
40 DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
41 DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
42 DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
43 DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
44 		uint32_t offset), 0);
45 DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
46 		uint32_t offset), 0);
47 DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0});
48 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0);
49 DEFINE_STUB(spdk_pci_register_error_handler, int, (spdk_pci_error_handler sighandler, void *ctx),
50 	    0);
51 DEFINE_STUB_V(spdk_pci_unregister_error_handler, (spdk_pci_error_handler sighandler));
52 DEFINE_STUB(spdk_pci_enumerate, int,
53 	    (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx),
54 	    -1);
55 DEFINE_STUB(spdk_pci_device_enable_interrupts, int, (struct spdk_pci_device *dev,
56 		uint32_t efd_count), 0);
57 DEFINE_STUB(spdk_pci_device_disable_interrupts, int, (struct spdk_pci_device *dev), 0);
58 DEFINE_STUB(spdk_pci_device_get_interrupt_efd_by_index, int, (struct spdk_pci_device *dev,
59 		uint32_t index), 0);
60 
61 SPDK_LOG_REGISTER_COMPONENT(nvme)
62 
63 struct dev_mem_resource {
64 	uint64_t phys_addr;
65 	uint64_t len;
66 	void *addr;
67 };
68 
69 struct nvme_pcie_ut_bdev_io {
70 	struct iovec iovs[NVME_MAX_SGL_DESCRIPTORS];
71 	int iovpos;
72 };
73 
74 struct nvme_driver *g_spdk_nvme_driver = NULL;
75 
76 int
77 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
78 			void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
79 {
80 	struct dev_mem_resource *dev_mem_res = (void *)dev;
81 
82 	*mapped_addr = dev_mem_res->addr;
83 	*phys_addr = dev_mem_res->phys_addr;
84 	*size = dev_mem_res->len;
85 
86 	return 0;
87 }
88 
89 void
90 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
91 {
92 	CU_ASSERT(ctrlr != NULL);
93 	if (hot_remove) {
94 		ctrlr->is_removed = true;
95 	}
96 
97 	ctrlr->is_failed = true;
98 }
99 
100 static uint64_t g_vtophys_size = 0;
101 
102 DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
103 uint64_t
104 spdk_vtophys(const void *buf, uint64_t *size)
105 {
106 	if (size && g_vtophys_size > 0) {
107 		*size = g_vtophys_size;
108 	}
109 
110 	HANDLE_RETURN_MOCK(spdk_vtophys);
111 
112 	return (uintptr_t)buf;
113 }
114 
115 DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
116 DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
117 				    struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
118 DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
119 DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
120 	    (const struct spdk_nvme_transport_id *trid, const char *hostnqn), NULL);
121 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
122 	    (struct spdk_nvme_ctrlr *ctrlr), {});
123 DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
124 	    (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
125 DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
126 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
127 		struct spdk_nvme_cmd *cmd));
128 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
129 		struct spdk_nvme_cpl *cpl));
130 
131 static void
132 prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
133 {
134 	memset(req, 0, sizeof(*req));
135 	memset(tr, 0, sizeof(*tr));
136 	tr->req = req;
137 	tr->prp_sgl_bus_addr = 0xDEADBEEF;
138 	if (prp_index) {
139 		*prp_index = 0;
140 	}
141 }
142 
143 static void
144 test_prp_list_append(void)
145 {
146 	struct nvme_request req;
147 	struct nvme_tracker tr;
148 	struct spdk_nvme_ctrlr ctrlr = {};
149 	uint32_t prp_index;
150 
151 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
152 	/* Non-DWORD-aligned buffer (invalid) */
153 	prp_list_prep(&tr, &req, &prp_index);
154 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000,
155 					    0x1000) == -EFAULT);
156 
157 	/* 512-byte buffer, 4K aligned */
158 	prp_list_prep(&tr, &req, &prp_index);
159 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
160 	CU_ASSERT(prp_index == 1);
161 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
162 
163 	/* 512-byte buffer, non-4K-aligned */
164 	prp_list_prep(&tr, &req, &prp_index);
165 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
166 	CU_ASSERT(prp_index == 1);
167 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
168 
169 	/* 4K buffer, 4K aligned */
170 	prp_list_prep(&tr, &req, &prp_index);
171 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
172 					    0x1000) == 0);
173 	CU_ASSERT(prp_index == 1);
174 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
175 
176 	/* 4K buffer, non-4K aligned */
177 	prp_list_prep(&tr, &req, &prp_index);
178 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
179 					    0x1000) == 0);
180 	CU_ASSERT(prp_index == 2);
181 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
182 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
183 
184 	/* 8K buffer, 4K aligned */
185 	prp_list_prep(&tr, &req, &prp_index);
186 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000,
187 					    0x1000) == 0);
188 	CU_ASSERT(prp_index == 2);
189 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
190 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
191 
192 	/* 8K buffer, non-4K aligned */
193 	prp_list_prep(&tr, &req, &prp_index);
194 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000,
195 					    0x1000) == 0);
196 	CU_ASSERT(prp_index == 3);
197 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
198 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
199 	CU_ASSERT(tr.u.prp[0] == 0x101000);
200 	CU_ASSERT(tr.u.prp[1] == 0x102000);
201 
202 	/* 12K buffer, 4K aligned */
203 	prp_list_prep(&tr, &req, &prp_index);
204 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000,
205 					    0x1000) == 0);
206 	CU_ASSERT(prp_index == 3);
207 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
208 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
209 	CU_ASSERT(tr.u.prp[0] == 0x101000);
210 	CU_ASSERT(tr.u.prp[1] == 0x102000);
211 
212 	/* 12K buffer, non-4K aligned */
213 	prp_list_prep(&tr, &req, &prp_index);
214 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000,
215 					    0x1000) == 0);
216 	CU_ASSERT(prp_index == 4);
217 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
218 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
219 	CU_ASSERT(tr.u.prp[0] == 0x101000);
220 	CU_ASSERT(tr.u.prp[1] == 0x102000);
221 	CU_ASSERT(tr.u.prp[2] == 0x103000);
222 
223 	/* Two 4K buffers, both 4K aligned */
224 	prp_list_prep(&tr, &req, &prp_index);
225 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
226 					    0x1000) == 0);
227 	CU_ASSERT(prp_index == 1);
228 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
229 					    0x1000) == 0);
230 	CU_ASSERT(prp_index == 2);
231 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
232 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
233 
234 	/* Two 4K buffers, first non-4K aligned, second 4K aligned */
235 	prp_list_prep(&tr, &req, &prp_index);
236 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
237 					    0x1000) == 0);
238 	CU_ASSERT(prp_index == 2);
239 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
240 					    0x1000) == 0);
241 	CU_ASSERT(prp_index == 3);
242 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
243 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
244 	CU_ASSERT(tr.u.prp[0] == 0x101000);
245 	CU_ASSERT(tr.u.prp[1] == 0x900000);
246 
247 	/* Two 4K buffers, both non-4K aligned (invalid) */
248 	prp_list_prep(&tr, &req, &prp_index);
249 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
250 					    0x1000) == 0);
251 	CU_ASSERT(prp_index == 2);
252 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000,
253 					    0x1000) == -EFAULT);
254 	CU_ASSERT(prp_index == 2);
255 
256 	/* 4K buffer, 4K aligned, but vtophys fails */
257 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
258 	prp_list_prep(&tr, &req, &prp_index);
259 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
260 					    0x1000) == -EFAULT);
261 	MOCK_CLEAR(spdk_vtophys);
262 
263 	/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
264 	prp_list_prep(&tr, &req, &prp_index);
265 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
266 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
267 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
268 
269 	/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
270 	prp_list_prep(&tr, &req, &prp_index);
271 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
272 					    NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
273 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
274 
275 	/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
276 	prp_list_prep(&tr, &req, &prp_index);
277 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
278 					    (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
279 
280 	/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
281 	prp_list_prep(&tr, &req, &prp_index);
282 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
283 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
284 }
285 
286 struct spdk_event_entry {
287 	struct spdk_pci_event		event;
288 	STAILQ_ENTRY(spdk_event_entry)	link;
289 };
290 
291 static STAILQ_HEAD(, spdk_event_entry) g_events = STAILQ_HEAD_INITIALIZER(g_events);
292 static bool g_device_allowed = false;
293 
294 int
295 spdk_pci_get_event(int fd, struct spdk_pci_event *event)
296 {
297 	struct spdk_event_entry *entry;
298 
299 	if (STAILQ_EMPTY(&g_events)) {
300 		return 0;
301 	}
302 
303 	entry = STAILQ_FIRST(&g_events);
304 	STAILQ_REMOVE_HEAD(&g_events, link);
305 
306 	*event = entry->event;
307 
308 	return 1;
309 }
310 
311 int
312 spdk_pci_device_allow(struct spdk_pci_addr *pci_addr)
313 {
314 	g_device_allowed = true;
315 
316 	return 0;
317 }
318 
319 static void
320 test_nvme_pcie_hotplug_monitor(void)
321 {
322 	struct nvme_pcie_ctrlr pctrlr = {};
323 	struct spdk_event_entry entry = {};
324 	struct nvme_driver driver;
325 	pthread_mutexattr_t attr;
326 	struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
327 
328 	/* Initiate variables and ctrlr */
329 	driver.initialized = true;
330 	driver.hotplug_fd = 123;
331 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
332 	CU_ASSERT(pthread_mutex_init(&pctrlr.ctrlr.ctrlr_lock, &attr) == 0);
333 	CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
334 	TAILQ_INIT(&driver.shared_attached_ctrlrs);
335 	g_spdk_nvme_driver = &driver;
336 
337 	/* Case 1:  SPDK_NVME_UEVENT_ADD/ NVME_VFIO / NVME_UIO */
338 	entry.event.action = SPDK_UEVENT_ADD;
339 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
340 	CU_ASSERT(STAILQ_EMPTY(&g_events));
341 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
342 
343 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
344 
345 	CU_ASSERT(STAILQ_EMPTY(&g_events));
346 	CU_ASSERT(g_device_allowed == true);
347 	g_device_allowed = false;
348 
349 	/* Case 2: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
350 	entry.event.action = SPDK_UEVENT_REMOVE;
351 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
352 	CU_ASSERT(STAILQ_EMPTY(&g_events));
353 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
354 
355 	MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
356 
357 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
358 
359 	CU_ASSERT(STAILQ_EMPTY(&g_events));
360 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
361 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
362 	pctrlr.ctrlr.is_failed = false;
363 	pctrlr.ctrlr.is_removed = false;
364 	MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
365 
366 	/* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO without event */
367 	pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
368 	snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
369 	pctrlr.ctrlr.remove_cb = NULL;
370 	pctrlr.ctrlr.is_failed = false;
371 	pctrlr.ctrlr.is_removed = false;
372 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
373 
374 	/* This should be set in the vfio req notifier cb */
375 	MOCK_SET(spdk_pci_device_is_removed, true);
376 
377 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
378 
379 	CU_ASSERT(STAILQ_EMPTY(&g_events));
380 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
381 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
382 	pctrlr.ctrlr.is_failed = false;
383 	pctrlr.ctrlr.is_removed = false;
384 	MOCK_CLEAR(spdk_pci_device_is_removed);
385 
386 	/* Case 4:  Removed device detected in another process  */
387 	MOCK_SET(spdk_pci_device_is_removed, false);
388 
389 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
390 
391 	CU_ASSERT(pctrlr.ctrlr.is_failed == false);
392 
393 	MOCK_SET(spdk_pci_device_is_removed, true);
394 
395 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
396 
397 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
398 
399 	pthread_mutex_destroy(&driver.lock);
400 	pthread_mutex_destroy(&pctrlr.ctrlr.ctrlr_lock);
401 	pthread_mutexattr_destroy(&attr);
402 	g_spdk_nvme_driver = NULL;
403 }
404 
405 static void
406 test_shadow_doorbell_update(void)
407 {
408 	bool ret;
409 
410 	/* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
411 	ret = nvme_pcie_qpair_need_event(10, 15, 14);
412 	CU_ASSERT(ret == false);
413 
414 	ret = nvme_pcie_qpair_need_event(14, 15, 14);
415 	CU_ASSERT(ret == true);
416 }
417 
418 static void
419 test_build_contig_hw_sgl_request(void)
420 {
421 	struct spdk_nvme_qpair qpair = {};
422 	struct nvme_request req = {};
423 	struct nvme_tracker tr = {};
424 	struct spdk_nvme_ctrlr ctrlr = {};
425 	int rc;
426 
427 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
428 	qpair.ctrlr = &ctrlr;
429 	/* Test 1: Payload covered by a single mapping */
430 	req.payload_size = 100;
431 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
432 	g_vtophys_size = 100;
433 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
434 
435 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
436 	CU_ASSERT(rc == 0);
437 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
438 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
439 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
440 
441 	MOCK_CLEAR(spdk_vtophys);
442 	g_vtophys_size = 0;
443 	memset(&qpair, 0, sizeof(qpair));
444 	memset(&req, 0, sizeof(req));
445 	memset(&tr, 0, sizeof(tr));
446 
447 	/* Test 2: Payload covered by a single mapping, but request is at an offset */
448 	qpair.ctrlr = &ctrlr;
449 	req.payload_size = 100;
450 	req.payload_offset = 50;
451 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
452 	g_vtophys_size = 1000;
453 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
454 
455 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
456 	CU_ASSERT(rc == 0);
457 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
458 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
459 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
460 
461 	MOCK_CLEAR(spdk_vtophys);
462 	g_vtophys_size = 0;
463 	memset(&qpair, 0, sizeof(qpair));
464 	memset(&req, 0, sizeof(req));
465 	memset(&tr, 0, sizeof(tr));
466 
467 	/* Test 3: Payload spans two mappings */
468 	qpair.ctrlr = &ctrlr;
469 	req.payload_size = 100;
470 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
471 	g_vtophys_size = 60;
472 	tr.prp_sgl_bus_addr = 0xFF0FF;
473 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
474 
475 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
476 	CU_ASSERT(rc == 0);
477 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
478 	CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
479 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
480 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
481 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
482 	CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
483 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
484 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
485 	CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
486 
487 	MOCK_CLEAR(spdk_vtophys);
488 	g_vtophys_size = 0;
489 	memset(&qpair, 0, sizeof(qpair));
490 	memset(&req, 0, sizeof(req));
491 	memset(&tr, 0, sizeof(tr));
492 }
493 
494 static void
495 test_nvme_pcie_qpair_build_metadata(void)
496 {
497 	struct nvme_pcie_qpair pqpair = {};
498 	struct spdk_nvme_qpair *qpair = &pqpair.qpair;
499 	struct nvme_tracker tr = {};
500 	struct nvme_request req = {};
501 	struct spdk_nvme_ctrlr	ctrlr = {};
502 	int rc;
503 
504 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
505 	tr.req = &req;
506 	qpair->ctrlr = &ctrlr;
507 
508 	req.payload = NVME_PAYLOAD_CONTIG(NULL, (void *)0xDEADBEE0);
509 	req.md_offset = 0;
510 	req.md_size = 4096;
511 	/* The nvme_pcie_qpair_build_metadata() function expects the cmd.psdt
512 	 * is set to SPDK_NVME_PSDT_SGL_MPTR_CONTIG, and then if metadata is
513 	 * built using SGL, cmd.psdt is changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
514 	 * by this function. We need to verify if this indeed is the case.
515 	 */
516 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
517 	tr.prp_sgl_bus_addr = 0xDBADBEEF;
518 	MOCK_SET(spdk_vtophys, 0xDCADBEE0);
519 
520 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
521 	CU_ASSERT(rc == 0);
522 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_SGL);
523 	CU_ASSERT(tr.meta_sgl.address == 0xDCADBEE0);
524 	CU_ASSERT(tr.meta_sgl.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
525 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 4096);
526 	CU_ASSERT(tr.meta_sgl.unkeyed.subtype == 0);
527 	CU_ASSERT(req.cmd.mptr == (0xDBADBEEF - sizeof(struct spdk_nvme_sgl_descriptor)));
528 
529 	/* Non-IOVA contiguous metadata buffers should fail. */
530 	g_vtophys_size = 1024;
531 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
532 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
533 	CU_ASSERT(rc == -EINVAL);
534 	g_vtophys_size = 0;
535 
536 	MOCK_CLEAR(spdk_vtophys);
537 
538 	/* Build non sgl metadata */
539 	MOCK_SET(spdk_vtophys, 0xDDADBEE0);
540 
541 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
542 	CU_ASSERT(rc == 0);
543 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
544 
545 	/* Build non sgl metadata while sgls are supported */
546 	memset(&tr.meta_sgl, 0, sizeof(tr.meta_sgl));
547 	/* If SGLs are supported, but not in metadata, the cmd.psdt
548 	 * shall not be changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
549 	 */
550 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
551 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, false, true);
552 	CU_ASSERT(rc == 0);
553 	CU_ASSERT(tr.meta_sgl.address == 0);
554 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 0);
555 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
556 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
557 
558 	/* Non-IOVA contiguous metadata buffers should fail. */
559 	g_vtophys_size = 1024;
560 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
561 	CU_ASSERT(rc == -EINVAL);
562 	g_vtophys_size = 0;
563 
564 	MOCK_CLEAR(spdk_vtophys);
565 }
566 
567 static int
568 nvme_pcie_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
569 {
570 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
571 	struct iovec *iov;
572 
573 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
574 
575 	iov = &bio->iovs[bio->iovpos];
576 
577 	*address = iov->iov_base;
578 	*length = iov->iov_len;
579 	bio->iovpos++;
580 
581 	return 0;
582 }
583 
584 static void
585 nvme_pcie_ut_reset_sgl(void *cb_arg, uint32_t offset)
586 {
587 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
588 	struct iovec *iov;
589 
590 	for (bio->iovpos = 0; bio->iovpos < NVME_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
591 		iov = &bio->iovs[bio->iovpos];
592 		/* Offset must be aligned with the start of any SGL entry */
593 		if (offset == 0) {
594 			break;
595 		}
596 
597 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
598 		offset -= iov->iov_len;
599 	}
600 
601 	SPDK_CU_ASSERT_FATAL(offset == 0);
602 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
603 }
604 
605 static void
606 test_nvme_pcie_qpair_build_prps_sgl_request(void)
607 {
608 	struct spdk_nvme_qpair qpair = {};
609 	struct nvme_request req = {};
610 	struct nvme_tracker tr = {};
611 	struct spdk_nvme_ctrlr ctrlr = {};
612 	struct nvme_pcie_ut_bdev_io bio = {};
613 	int rc;
614 
615 	tr.req = &req;
616 	qpair.ctrlr = &ctrlr;
617 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
618 	req.payload_size = 4096;
619 	ctrlr.page_size = 4096;
620 	bio.iovs[0].iov_base = (void *)0x100000;
621 	bio.iovs[0].iov_len = 4096;
622 
623 	rc = nvme_pcie_qpair_build_prps_sgl_request(&qpair, &req, &tr, NULL);
624 	CU_ASSERT(rc == 0);
625 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
626 }
627 
628 static void
629 test_nvme_pcie_qpair_build_hw_sgl_request(void)
630 {
631 	struct nvme_pcie_qpair pqpair = {};
632 	struct spdk_nvme_qpair *qpair = &pqpair.qpair;
633 	struct nvme_request req = {};
634 	struct nvme_tracker tr = {};
635 	struct nvme_pcie_ut_bdev_io bio = {};
636 	struct spdk_nvme_ctrlr ctrlr = {};
637 	int rc;
638 
639 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
640 	qpair->ctrlr = &ctrlr;
641 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
642 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
643 	tr.prp_sgl_bus_addr =  0xDAADBEE0;
644 	g_vtophys_size = 4096;
645 
646 	/* Multiple vectors, 2k + 4k + 2k */
647 	req.payload_size = 8192;
648 	bio.iovpos = 3;
649 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
650 	bio.iovs[0].iov_len = 2048;
651 	bio.iovs[1].iov_base = (void *)0xDCADBEE0;
652 	bio.iovs[1].iov_len = 4096;
653 	bio.iovs[2].iov_base = (void *)0xDDADBEE0;
654 	bio.iovs[2].iov_len = 2048;
655 
656 	rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, &req, &tr, true);
657 	CU_ASSERT(rc == 0);
658 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
659 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 2048);
660 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
661 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
662 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
663 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 4096);
664 	CU_ASSERT(tr.u.sgl[1].address == 0xDCADBEE0);
665 	CU_ASSERT(tr.u.sgl[2].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
666 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
667 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
668 	CU_ASSERT(tr.u.sgl[2].address == 0xDDADBEE0);
669 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
670 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
671 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
672 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDAADBEE0);
673 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 48);
674 
675 	/* Single vector */
676 	memset(&tr, 0, sizeof(tr));
677 	memset(&bio, 0, sizeof(bio));
678 	memset(&req, 0, sizeof(req));
679 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
680 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
681 	req.payload_size = 4096;
682 	bio.iovpos = 1;
683 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
684 	bio.iovs[0].iov_len = 4096;
685 
686 	rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, &req, &tr, true);
687 	CU_ASSERT(rc == 0);
688 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
689 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 4096);
690 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
691 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
692 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
693 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
694 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
695 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDBADBEE0);
696 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4096);
697 }
698 
699 static void
700 test_nvme_pcie_qpair_build_contig_request(void)
701 {
702 	struct nvme_pcie_qpair pqpair = {};
703 	struct nvme_request req = {};
704 	struct nvme_tracker tr = {};
705 	struct spdk_nvme_ctrlr ctrlr = {};
706 	int rc;
707 
708 	pqpair.qpair.ctrlr = &ctrlr;
709 	ctrlr.page_size = 0x1000;
710 
711 	/* 1 prp, 4k-aligned */
712 	prp_list_prep(&tr, &req, NULL);
713 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
714 	req.payload_size = 0x1000;
715 
716 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
717 	CU_ASSERT(rc == 0);
718 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
719 
720 	/* 2 prps, non-4K-aligned */
721 	prp_list_prep(&tr, &req, NULL);
722 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
723 	req.payload_size = 0x1000;
724 	req.payload_offset = 0x800;
725 
726 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
727 	CU_ASSERT(rc == 0);
728 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
729 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
730 
731 	/* 3 prps, 4k-aligned */
732 	prp_list_prep(&tr, &req, NULL);
733 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
734 	req.payload_size = 0x3000;
735 
736 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
737 	CU_ASSERT(rc == 0);
738 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
739 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
740 	CU_ASSERT(tr.u.prp[0] == 0x101000);
741 	CU_ASSERT(tr.u.prp[1] == 0x102000);
742 
743 	/* address not dword aligned */
744 	prp_list_prep(&tr, &req, NULL);
745 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100001, NULL);
746 	req.payload_size = 0x3000;
747 	req.qpair = &pqpair.qpair;
748 	TAILQ_INIT(&pqpair.outstanding_tr);
749 	TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list);
750 
751 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
752 	CU_ASSERT(rc == -EFAULT);
753 }
754 
755 static void
756 test_nvme_pcie_ctrlr_regs_get_set(void)
757 {
758 	struct nvme_pcie_ctrlr pctrlr = {};
759 	volatile struct spdk_nvme_registers regs = {};
760 	uint32_t value_4;
761 	uint64_t value_8;
762 	int rc;
763 
764 	pctrlr.regs = &regs;
765 
766 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 8, 4);
767 	CU_ASSERT(rc == 0);
768 
769 	rc = nvme_pcie_ctrlr_get_reg_4(&pctrlr.ctrlr, 8, &value_4);
770 	CU_ASSERT(rc == 0);
771 	CU_ASSERT(value_4 == 4);
772 
773 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, 0, 0x100000000);
774 	CU_ASSERT(rc == 0);
775 
776 	rc = nvme_pcie_ctrlr_get_reg_8(&pctrlr.ctrlr, 0, &value_8);
777 	CU_ASSERT(rc == 0);
778 	CU_ASSERT(value_8 == 0x100000000);
779 }
780 
781 static void
782 test_nvme_pcie_ctrlr_map_unmap_cmb(void)
783 {
784 	struct nvme_pcie_ctrlr pctrlr = {};
785 	volatile struct spdk_nvme_registers regs = {};
786 	union spdk_nvme_cmbsz_register cmbsz = {};
787 	union spdk_nvme_cmbloc_register cmbloc = {};
788 	struct dev_mem_resource cmd_res = {};
789 	int rc;
790 
791 	pctrlr.regs = &regs;
792 	pctrlr.devhandle = (void *)&cmd_res;
793 	cmd_res.addr = (void *)0x7f7c0080d000;
794 	cmd_res.len = 0x800000;
795 	cmd_res.phys_addr = 0xFC800000;
796 	/* Configure cmb size with unit size 4k, offset 100, unsupported SQ */
797 	cmbsz.bits.sz = 512;
798 	cmbsz.bits.szu = 0;
799 	cmbsz.bits.sqs = 0;
800 	cmbloc.bits.bir = 0;
801 	cmbloc.bits.ofst = 100;
802 
803 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
804 				  cmbsz.raw);
805 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
806 				  cmbloc.raw);
807 
808 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
809 	CU_ASSERT(pctrlr.cmb.bar_va == (void *)0x7f7c0080d000);
810 	CU_ASSERT(pctrlr.cmb.bar_pa == 0xFC800000);
811 	CU_ASSERT(pctrlr.cmb.size == 512 * 4096);
812 	CU_ASSERT(pctrlr.cmb.current_offset == 4096 * 100);
813 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
814 
815 	rc = nvme_pcie_ctrlr_unmap_cmb(&pctrlr);
816 	CU_ASSERT(rc == 0);
817 
818 	/* Invalid mapping information */
819 	memset(&pctrlr.cmb, 0, sizeof(pctrlr.cmb));
820 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 0);
821 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 0);
822 
823 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
824 	CU_ASSERT(pctrlr.cmb.bar_va == NULL);
825 	CU_ASSERT(pctrlr.cmb.bar_pa == 0);
826 	CU_ASSERT(pctrlr.cmb.size == 0);
827 	CU_ASSERT(pctrlr.cmb.current_offset == 0);
828 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
829 }
830 
831 
832 static void
833 prepare_map_io_cmd(struct nvme_pcie_ctrlr *pctrlr)
834 {
835 	union spdk_nvme_cmbsz_register cmbsz = {};
836 	union spdk_nvme_cmbloc_register cmbloc = {};
837 
838 	cmbsz.bits.sz = 512;
839 	cmbsz.bits.wds = 1;
840 	cmbsz.bits.rds = 1;
841 
842 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
843 				  cmbsz.raw);
844 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
845 				  cmbloc.raw);
846 
847 	pctrlr->cmb.bar_va = (void *)0x7F7C0080D000;
848 	pctrlr->cmb.bar_pa = 0xFC800000;
849 	pctrlr->cmb.current_offset = 1ULL << 22;
850 	pctrlr->cmb.size = (1ULL << 22) * 512;
851 	pctrlr->cmb.mem_register_addr = NULL;
852 	pctrlr->ctrlr.opts.use_cmb_sqs = false;
853 }
854 
855 static void
856 test_nvme_pcie_ctrlr_map_io_cmb(void)
857 {
858 	struct nvme_pcie_ctrlr pctrlr = {};
859 	volatile struct spdk_nvme_registers regs = {};
860 	union spdk_nvme_cmbsz_register cmbsz = {};
861 	void *mem_reg_addr = NULL;
862 	size_t size;
863 	int rc;
864 
865 	pctrlr.regs = &regs;
866 	prepare_map_io_cmd(&pctrlr);
867 
868 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
869 	/* Ceil the current cmb vaddr and cmb size to 2MB_aligned */
870 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C00E00000);
871 	CU_ASSERT(size == 0x7FE00000);
872 
873 	rc = nvme_pcie_ctrlr_unmap_io_cmb(&pctrlr.ctrlr);
874 	CU_ASSERT(rc == 0);
875 	CU_ASSERT(pctrlr.cmb.mem_register_addr == NULL);
876 	CU_ASSERT(pctrlr.cmb.mem_register_size == 0);
877 
878 	/* cmb mem_register_addr not NULL */
879 	prepare_map_io_cmd(&pctrlr);
880 	pctrlr.cmb.mem_register_addr = (void *)0xDEADBEEF;
881 	pctrlr.cmb.mem_register_size = 1024;
882 
883 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
884 	CU_ASSERT(size == 1024);
885 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
886 
887 	/* cmb.bar_va is NULL */
888 	prepare_map_io_cmd(&pctrlr);
889 	pctrlr.cmb.bar_va = NULL;
890 
891 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
892 	CU_ASSERT(mem_reg_addr == NULL);
893 	CU_ASSERT(size == 0);
894 
895 	/* submission queue already used */
896 	prepare_map_io_cmd(&pctrlr);
897 	pctrlr.ctrlr.opts.use_cmb_sqs = true;
898 
899 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
900 	CU_ASSERT(mem_reg_addr == NULL);
901 	CU_ASSERT(size == 0);
902 
903 	pctrlr.ctrlr.opts.use_cmb_sqs = false;
904 
905 	/* Only SQS is supported */
906 	prepare_map_io_cmd(&pctrlr);
907 	cmbsz.bits.wds = 0;
908 	cmbsz.bits.rds = 0;
909 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
910 				  cmbsz.raw);
911 
912 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
913 	CU_ASSERT(mem_reg_addr == NULL);
914 	CU_ASSERT(size == 0);
915 
916 	/* CMB size is less than 4MB */
917 	prepare_map_io_cmd(&pctrlr);
918 	pctrlr.cmb.size = 1ULL << 16;
919 
920 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
921 	CU_ASSERT(mem_reg_addr == NULL);
922 	CU_ASSERT(size == 0);
923 }
924 
925 static void
926 test_nvme_pcie_ctrlr_map_unmap_pmr(void)
927 {
928 	struct nvme_pcie_ctrlr pctrlr = {};
929 	volatile struct spdk_nvme_registers regs = {};
930 	union spdk_nvme_pmrcap_register pmrcap = {};
931 	struct dev_mem_resource cmd_res = {};
932 	int rc;
933 
934 	pctrlr.regs = &regs;
935 	pctrlr.devhandle = (void *)&cmd_res;
936 	regs.cap.bits.pmrs = 1;
937 	cmd_res.addr = (void *)0x7F7C0080d000;
938 	cmd_res.len = 0x800000;
939 	cmd_res.phys_addr = 0xFC800000;
940 	pmrcap.bits.bir = 2;
941 	pmrcap.bits.cmss = 1;
942 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
943 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
944 				  pmrcap.raw);
945 
946 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
947 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
948 	/* Controller memory space enable, bit 1 */
949 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0xFC800002);
950 	CU_ASSERT(pctrlr.regs->pmrsts.raw == 0);
951 	CU_ASSERT(pctrlr.pmr.bar_va == (void *)0x7F7C0080d000);
952 	CU_ASSERT(pctrlr.pmr.bar_pa == 0xFC800000);
953 	CU_ASSERT(pctrlr.pmr.size == 0x800000);
954 
955 	rc = nvme_pcie_ctrlr_unmap_pmr(&pctrlr);
956 	CU_ASSERT(rc == 0);
957 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
958 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0);
959 
960 	/* pmrcap value invalid */
961 	memset(&pctrlr, 0, sizeof(pctrlr));
962 	memset((void *)&regs, 0, sizeof(regs));
963 	memset(&cmd_res, 0, sizeof(cmd_res));
964 
965 	pctrlr.regs = &regs;
966 	pctrlr.devhandle = (void *)&cmd_res;
967 	regs.cap.bits.pmrs = 1;
968 	cmd_res.addr = (void *)0x7F7C0080d000;
969 	cmd_res.len = 0x800000;
970 	cmd_res.phys_addr = 0xFC800000;
971 	pmrcap.raw = 0;
972 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
973 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
974 				  pmrcap.raw);
975 
976 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
977 	CU_ASSERT(pctrlr.pmr.bar_va == NULL);
978 	CU_ASSERT(pctrlr.pmr.bar_pa == 0);
979 	CU_ASSERT(pctrlr.pmr.size == 0);
980 }
981 
982 static void
983 test_nvme_pcie_ctrlr_config_pmr(void)
984 {
985 	struct nvme_pcie_ctrlr pctrlr = {};
986 	union spdk_nvme_pmrcap_register pmrcap = {};
987 	union spdk_nvme_pmrsts_register pmrsts = {};
988 	union spdk_nvme_cap_register	cap = {};
989 	union spdk_nvme_pmrctl_register pmrctl = {};
990 	volatile struct spdk_nvme_registers regs = {};
991 	int rc;
992 
993 	/* pmrctl enable */
994 	pctrlr.regs = &regs;
995 	pmrcap.bits.pmrtu = 0;
996 	pmrcap.bits.pmrto = 1;
997 	pmrsts.bits.nrdy = false;
998 	pmrctl.bits.en = 0;
999 	cap.bits.pmrs = 1;
1000 
1001 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
1002 	SPDK_CU_ASSERT_FATAL(rc == 0);
1003 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
1004 				       cap.raw);
1005 	SPDK_CU_ASSERT_FATAL(rc == 0);
1006 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1007 				       pmrcap.raw);
1008 	SPDK_CU_ASSERT_FATAL(rc == 0);
1009 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1010 				       pmrsts.raw);
1011 	SPDK_CU_ASSERT_FATAL(rc == 0);
1012 
1013 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, true);
1014 	CU_ASSERT(rc == 0);
1015 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1016 	CU_ASSERT(rc == 0);
1017 	CU_ASSERT(pmrctl.bits.en == true);
1018 
1019 	/* pmrctl disable */
1020 	pmrsts.bits.nrdy = true;
1021 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1022 				       pmrsts.raw);
1023 	SPDK_CU_ASSERT_FATAL(rc == 0);
1024 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
1025 	SPDK_CU_ASSERT_FATAL(rc == 0);
1026 
1027 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1028 	CU_ASSERT(rc == 0);
1029 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1030 	CU_ASSERT(rc == 0);
1031 	CU_ASSERT(pmrctl.bits.en == false);
1032 
1033 	/* configuration exist */
1034 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1035 	CU_ASSERT(rc == -EINVAL);
1036 }
1037 
1038 static void
1039 map_io_pmr_init(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrcap_register *pmrcap)
1040 {
1041 	pmrcap->raw = 0;
1042 	pmrcap->bits.rds = 1;
1043 	pmrcap->bits.wds = 1;
1044 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1045 				  pmrcap->raw);
1046 	pctrlr->regs->cap.bits.pmrs = 1;
1047 	pctrlr->pmr.mem_register_size = 0;
1048 	pctrlr->pmr.mem_register_addr = NULL;
1049 	pctrlr->pmr.bar_va = (void *)0x7F7C00E30000;
1050 	pctrlr->pmr.size = (1 << 22) * 128;
1051 }
1052 
1053 static void
1054 test_nvme_pcie_ctrlr_map_io_pmr(void)
1055 {
1056 	struct nvme_pcie_ctrlr pctrlr = {};
1057 	struct spdk_nvme_ctrlr *ctrlr;
1058 	volatile struct spdk_nvme_registers regs = {};
1059 	union spdk_nvme_pmrcap_register pmrcap;
1060 	void *mem_reg_addr = NULL;
1061 	size_t rt_size = 0;
1062 
1063 	ctrlr = &pctrlr.ctrlr;
1064 	pctrlr.regs = &regs;
1065 
1066 	/* PMR is not supported by the controller */
1067 	map_io_pmr_init(&pctrlr, &pmrcap);
1068 	regs.cap.bits.pmrs = 0;
1069 
1070 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1071 	CU_ASSERT(mem_reg_addr == NULL);
1072 
1073 	/* mem_register_addr not NULL. */
1074 	map_io_pmr_init(&pctrlr, &pmrcap);
1075 	pctrlr.pmr.mem_register_addr = (void *)0xDEADBEEF;
1076 	pctrlr.pmr.mem_register_size = 1024;
1077 
1078 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1079 	CU_ASSERT(rt_size == 1024);
1080 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
1081 
1082 	/* PMR not available */
1083 	map_io_pmr_init(&pctrlr, &pmrcap);
1084 	pctrlr.pmr.bar_va = NULL;
1085 	pctrlr.pmr.mem_register_addr = NULL;
1086 
1087 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1088 	CU_ASSERT(mem_reg_addr == NULL);
1089 	CU_ASSERT(rt_size == 0);
1090 
1091 	/* WDS / RDS is not supported */
1092 	map_io_pmr_init(&pctrlr, &pmrcap);
1093 	pmrcap.bits.rds = 0;
1094 	pmrcap.bits.wds = 0;
1095 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1096 				  pmrcap.raw);
1097 
1098 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1099 	CU_ASSERT(mem_reg_addr == NULL);
1100 	CU_ASSERT(rt_size == 0);
1101 
1102 	/* PMR is less than 4MiB in size then abort PMR mapping  */
1103 	map_io_pmr_init(&pctrlr, &pmrcap);
1104 	pctrlr.pmr.size = (1ULL << 20);
1105 
1106 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1107 	CU_ASSERT(mem_reg_addr == NULL);
1108 	CU_ASSERT(rt_size == 0);
1109 
1110 	/* All parameters success */
1111 	map_io_pmr_init(&pctrlr, &pmrcap);
1112 
1113 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1114 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C01000000);
1115 	CU_ASSERT(rt_size == 0x1FE00000);
1116 }
1117 
1118 int
1119 main(int argc, char **argv)
1120 {
1121 	CU_pSuite	suite = NULL;
1122 	unsigned int	num_failures;
1123 
1124 	CU_initialize_registry();
1125 
1126 	suite = CU_add_suite("nvme_pcie", NULL, NULL);
1127 	CU_ADD_TEST(suite, test_prp_list_append);
1128 	CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
1129 	CU_ADD_TEST(suite, test_shadow_doorbell_update);
1130 	CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
1131 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_metadata);
1132 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_prps_sgl_request);
1133 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_hw_sgl_request);
1134 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_contig_request);
1135 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_regs_get_set);
1136 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_cmb);
1137 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_cmb);
1138 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_pmr);
1139 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_config_pmr);
1140 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_pmr);
1141 
1142 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1143 	CU_cleanup_registry();
1144 	return num_failures;
1145 }
1146