xref: /spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c (revision ee32a82bfd3ff5b1a10ed775ee06f0eaffce60eb)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #define UNIT_TEST_NO_VTOPHYS
12 
13 #include "nvme/nvme_pcie.c"
14 #include "nvme/nvme_pcie_common.c"
15 #include "common/lib/nvme/common_stubs.h"
16 
17 pid_t g_spdk_nvme_pid;
18 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
19 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
20 
21 DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
22 
23 DEFINE_STUB(nvme_wait_for_completion, int,
24 	    (struct spdk_nvme_qpair *qpair,
25 	     struct nvme_completion_poll_status *status), 0);
26 DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
27 
28 DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
29 		struct nvme_request *req), 0);
30 
31 DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
32 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
33 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr),
34 	    SPDK_ENV_NUMA_ID_ANY);
35 
36 DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
37 	    0);
38 DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
39 		void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
40 DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
41 DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
42 DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
43 DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
44 		uint32_t offset), 0);
45 DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
46 		uint32_t offset), 0);
47 DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0});
48 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0);
49 DEFINE_STUB(spdk_pci_register_error_handler, int, (spdk_pci_error_handler sighandler, void *ctx),
50 	    0);
51 DEFINE_STUB_V(spdk_pci_unregister_error_handler, (spdk_pci_error_handler sighandler));
52 DEFINE_STUB(spdk_pci_enumerate, int,
53 	    (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx),
54 	    -1);
55 
56 SPDK_LOG_REGISTER_COMPONENT(nvme)
57 
58 struct dev_mem_resource {
59 	uint64_t phys_addr;
60 	uint64_t len;
61 	void *addr;
62 };
63 
64 struct nvme_pcie_ut_bdev_io {
65 	struct iovec iovs[NVME_MAX_SGL_DESCRIPTORS];
66 	int iovpos;
67 };
68 
69 struct nvme_driver *g_spdk_nvme_driver = NULL;
70 
71 int
72 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
73 			void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
74 {
75 	struct dev_mem_resource *dev_mem_res = (void *)dev;
76 
77 	*mapped_addr = dev_mem_res->addr;
78 	*phys_addr = dev_mem_res->phys_addr;
79 	*size = dev_mem_res->len;
80 
81 	return 0;
82 }
83 
84 void
85 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
86 {
87 	CU_ASSERT(ctrlr != NULL);
88 	if (hot_remove) {
89 		ctrlr->is_removed = true;
90 	}
91 
92 	ctrlr->is_failed = true;
93 }
94 
95 static uint64_t g_vtophys_size = 0;
96 
97 DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
98 uint64_t
99 spdk_vtophys(const void *buf, uint64_t *size)
100 {
101 	if (size && g_vtophys_size > 0) {
102 		*size = g_vtophys_size;
103 	}
104 
105 	HANDLE_RETURN_MOCK(spdk_vtophys);
106 
107 	return (uintptr_t)buf;
108 }
109 
110 DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
111 DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
112 				    struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
113 DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
114 DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
115 	    (const struct spdk_nvme_transport_id *trid, const char *hostnqn), NULL);
116 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
117 	    (struct spdk_nvme_ctrlr *ctrlr), {});
118 DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
119 	    (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
120 DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
121 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
122 		struct spdk_nvme_cmd *cmd));
123 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
124 		struct spdk_nvme_cpl *cpl));
125 
126 static void
127 prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
128 {
129 	memset(req, 0, sizeof(*req));
130 	memset(tr, 0, sizeof(*tr));
131 	tr->req = req;
132 	tr->prp_sgl_bus_addr = 0xDEADBEEF;
133 	if (prp_index) {
134 		*prp_index = 0;
135 	}
136 }
137 
138 static void
139 test_prp_list_append(void)
140 {
141 	struct nvme_request req;
142 	struct nvme_tracker tr;
143 	struct spdk_nvme_ctrlr ctrlr = {};
144 	uint32_t prp_index;
145 
146 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
147 	/* Non-DWORD-aligned buffer (invalid) */
148 	prp_list_prep(&tr, &req, &prp_index);
149 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000,
150 					    0x1000) == -EFAULT);
151 
152 	/* 512-byte buffer, 4K aligned */
153 	prp_list_prep(&tr, &req, &prp_index);
154 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
155 	CU_ASSERT(prp_index == 1);
156 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
157 
158 	/* 512-byte buffer, non-4K-aligned */
159 	prp_list_prep(&tr, &req, &prp_index);
160 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
161 	CU_ASSERT(prp_index == 1);
162 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
163 
164 	/* 4K buffer, 4K aligned */
165 	prp_list_prep(&tr, &req, &prp_index);
166 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
167 					    0x1000) == 0);
168 	CU_ASSERT(prp_index == 1);
169 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
170 
171 	/* 4K buffer, non-4K aligned */
172 	prp_list_prep(&tr, &req, &prp_index);
173 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
174 					    0x1000) == 0);
175 	CU_ASSERT(prp_index == 2);
176 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
177 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
178 
179 	/* 8K buffer, 4K aligned */
180 	prp_list_prep(&tr, &req, &prp_index);
181 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000,
182 					    0x1000) == 0);
183 	CU_ASSERT(prp_index == 2);
184 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
185 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
186 
187 	/* 8K buffer, non-4K aligned */
188 	prp_list_prep(&tr, &req, &prp_index);
189 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000,
190 					    0x1000) == 0);
191 	CU_ASSERT(prp_index == 3);
192 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
193 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
194 	CU_ASSERT(tr.u.prp[0] == 0x101000);
195 	CU_ASSERT(tr.u.prp[1] == 0x102000);
196 
197 	/* 12K buffer, 4K aligned */
198 	prp_list_prep(&tr, &req, &prp_index);
199 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000,
200 					    0x1000) == 0);
201 	CU_ASSERT(prp_index == 3);
202 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
203 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
204 	CU_ASSERT(tr.u.prp[0] == 0x101000);
205 	CU_ASSERT(tr.u.prp[1] == 0x102000);
206 
207 	/* 12K buffer, non-4K aligned */
208 	prp_list_prep(&tr, &req, &prp_index);
209 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000,
210 					    0x1000) == 0);
211 	CU_ASSERT(prp_index == 4);
212 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
213 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
214 	CU_ASSERT(tr.u.prp[0] == 0x101000);
215 	CU_ASSERT(tr.u.prp[1] == 0x102000);
216 	CU_ASSERT(tr.u.prp[2] == 0x103000);
217 
218 	/* Two 4K buffers, both 4K aligned */
219 	prp_list_prep(&tr, &req, &prp_index);
220 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
221 					    0x1000) == 0);
222 	CU_ASSERT(prp_index == 1);
223 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
224 					    0x1000) == 0);
225 	CU_ASSERT(prp_index == 2);
226 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
227 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
228 
229 	/* Two 4K buffers, first non-4K aligned, second 4K aligned */
230 	prp_list_prep(&tr, &req, &prp_index);
231 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
232 					    0x1000) == 0);
233 	CU_ASSERT(prp_index == 2);
234 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
235 					    0x1000) == 0);
236 	CU_ASSERT(prp_index == 3);
237 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
238 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
239 	CU_ASSERT(tr.u.prp[0] == 0x101000);
240 	CU_ASSERT(tr.u.prp[1] == 0x900000);
241 
242 	/* Two 4K buffers, both non-4K aligned (invalid) */
243 	prp_list_prep(&tr, &req, &prp_index);
244 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
245 					    0x1000) == 0);
246 	CU_ASSERT(prp_index == 2);
247 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000,
248 					    0x1000) == -EFAULT);
249 	CU_ASSERT(prp_index == 2);
250 
251 	/* 4K buffer, 4K aligned, but vtophys fails */
252 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
253 	prp_list_prep(&tr, &req, &prp_index);
254 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
255 					    0x1000) == -EFAULT);
256 	MOCK_CLEAR(spdk_vtophys);
257 
258 	/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
259 	prp_list_prep(&tr, &req, &prp_index);
260 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
261 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
262 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
263 
264 	/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
265 	prp_list_prep(&tr, &req, &prp_index);
266 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
267 					    NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
268 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
269 
270 	/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
271 	prp_list_prep(&tr, &req, &prp_index);
272 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
273 					    (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
274 
275 	/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
276 	prp_list_prep(&tr, &req, &prp_index);
277 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
278 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
279 }
280 
281 struct spdk_event_entry {
282 	struct spdk_pci_event		event;
283 	STAILQ_ENTRY(spdk_event_entry)	link;
284 };
285 
286 static STAILQ_HEAD(, spdk_event_entry) g_events = STAILQ_HEAD_INITIALIZER(g_events);
287 static bool g_device_allowed = false;
288 
289 int
290 spdk_pci_get_event(int fd, struct spdk_pci_event *event)
291 {
292 	struct spdk_event_entry *entry;
293 
294 	if (STAILQ_EMPTY(&g_events)) {
295 		return 0;
296 	}
297 
298 	entry = STAILQ_FIRST(&g_events);
299 	STAILQ_REMOVE_HEAD(&g_events, link);
300 
301 	*event = entry->event;
302 
303 	return 1;
304 }
305 
306 int
307 spdk_pci_device_allow(struct spdk_pci_addr *pci_addr)
308 {
309 	g_device_allowed = true;
310 
311 	return 0;
312 }
313 
314 static void
315 test_nvme_pcie_hotplug_monitor(void)
316 {
317 	struct nvme_pcie_ctrlr pctrlr = {};
318 	struct spdk_event_entry entry = {};
319 	struct nvme_driver driver;
320 	pthread_mutexattr_t attr;
321 	struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
322 
323 	/* Initiate variables and ctrlr */
324 	driver.initialized = true;
325 	driver.hotplug_fd = 123;
326 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
327 	CU_ASSERT(pthread_mutex_init(&pctrlr.ctrlr.ctrlr_lock, &attr) == 0);
328 	CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
329 	TAILQ_INIT(&driver.shared_attached_ctrlrs);
330 	g_spdk_nvme_driver = &driver;
331 
332 	/* Case 1:  SPDK_NVME_UEVENT_ADD/ NVME_VFIO / NVME_UIO */
333 	entry.event.action = SPDK_UEVENT_ADD;
334 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
335 	CU_ASSERT(STAILQ_EMPTY(&g_events));
336 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
337 
338 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
339 
340 	CU_ASSERT(STAILQ_EMPTY(&g_events));
341 	CU_ASSERT(g_device_allowed == true);
342 	g_device_allowed = false;
343 
344 	/* Case 2: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
345 	entry.event.action = SPDK_UEVENT_REMOVE;
346 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
347 	CU_ASSERT(STAILQ_EMPTY(&g_events));
348 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
349 
350 	MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
351 
352 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
353 
354 	CU_ASSERT(STAILQ_EMPTY(&g_events));
355 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
356 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
357 	pctrlr.ctrlr.is_failed = false;
358 	pctrlr.ctrlr.is_removed = false;
359 	MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
360 
361 	/* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO without event */
362 	pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
363 	snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
364 	pctrlr.ctrlr.remove_cb = NULL;
365 	pctrlr.ctrlr.is_failed = false;
366 	pctrlr.ctrlr.is_removed = false;
367 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
368 
369 	/* This should be set in the vfio req notifier cb */
370 	MOCK_SET(spdk_pci_device_is_removed, true);
371 
372 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
373 
374 	CU_ASSERT(STAILQ_EMPTY(&g_events));
375 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
376 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
377 	pctrlr.ctrlr.is_failed = false;
378 	pctrlr.ctrlr.is_removed = false;
379 	MOCK_CLEAR(spdk_pci_device_is_removed);
380 
381 	/* Case 4:  Removed device detected in another process  */
382 	MOCK_SET(spdk_pci_device_is_removed, false);
383 
384 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
385 
386 	CU_ASSERT(pctrlr.ctrlr.is_failed == false);
387 
388 	MOCK_SET(spdk_pci_device_is_removed, true);
389 
390 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
391 
392 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
393 
394 	pthread_mutex_destroy(&driver.lock);
395 	pthread_mutex_destroy(&pctrlr.ctrlr.ctrlr_lock);
396 	pthread_mutexattr_destroy(&attr);
397 	g_spdk_nvme_driver = NULL;
398 }
399 
400 static void
401 test_shadow_doorbell_update(void)
402 {
403 	bool ret;
404 
405 	/* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
406 	ret = nvme_pcie_qpair_need_event(10, 15, 14);
407 	CU_ASSERT(ret == false);
408 
409 	ret = nvme_pcie_qpair_need_event(14, 15, 14);
410 	CU_ASSERT(ret == true);
411 }
412 
413 static void
414 test_build_contig_hw_sgl_request(void)
415 {
416 	struct spdk_nvme_qpair qpair = {};
417 	struct nvme_request req = {};
418 	struct nvme_tracker tr = {};
419 	struct spdk_nvme_ctrlr ctrlr = {};
420 	int rc;
421 
422 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
423 	qpair.ctrlr = &ctrlr;
424 	/* Test 1: Payload covered by a single mapping */
425 	req.payload_size = 100;
426 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
427 	g_vtophys_size = 100;
428 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
429 
430 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
431 	CU_ASSERT(rc == 0);
432 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
433 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
434 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
435 
436 	MOCK_CLEAR(spdk_vtophys);
437 	g_vtophys_size = 0;
438 	memset(&qpair, 0, sizeof(qpair));
439 	memset(&req, 0, sizeof(req));
440 	memset(&tr, 0, sizeof(tr));
441 
442 	/* Test 2: Payload covered by a single mapping, but request is at an offset */
443 	qpair.ctrlr = &ctrlr;
444 	req.payload_size = 100;
445 	req.payload_offset = 50;
446 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
447 	g_vtophys_size = 1000;
448 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
449 
450 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
451 	CU_ASSERT(rc == 0);
452 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
453 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
454 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
455 
456 	MOCK_CLEAR(spdk_vtophys);
457 	g_vtophys_size = 0;
458 	memset(&qpair, 0, sizeof(qpair));
459 	memset(&req, 0, sizeof(req));
460 	memset(&tr, 0, sizeof(tr));
461 
462 	/* Test 3: Payload spans two mappings */
463 	qpair.ctrlr = &ctrlr;
464 	req.payload_size = 100;
465 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
466 	g_vtophys_size = 60;
467 	tr.prp_sgl_bus_addr = 0xFF0FF;
468 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
469 
470 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
471 	CU_ASSERT(rc == 0);
472 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
473 	CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
474 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
475 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
476 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
477 	CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
478 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
479 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
480 	CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
481 
482 	MOCK_CLEAR(spdk_vtophys);
483 	g_vtophys_size = 0;
484 	memset(&qpair, 0, sizeof(qpair));
485 	memset(&req, 0, sizeof(req));
486 	memset(&tr, 0, sizeof(tr));
487 }
488 
489 static void
490 test_nvme_pcie_qpair_build_metadata(void)
491 {
492 	struct nvme_pcie_qpair pqpair = {};
493 	struct spdk_nvme_qpair *qpair = &pqpair.qpair;
494 	struct nvme_tracker tr = {};
495 	struct nvme_request req = {};
496 	struct spdk_nvme_ctrlr	ctrlr = {};
497 	int rc;
498 
499 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
500 	tr.req = &req;
501 	qpair->ctrlr = &ctrlr;
502 
503 	req.payload = NVME_PAYLOAD_CONTIG(NULL, (void *)0xDEADBEE0);
504 	req.md_offset = 0;
505 	req.md_size = 4096;
506 	/* The nvme_pcie_qpair_build_metadata() function expects the cmd.psdt
507 	 * is set to SPDK_NVME_PSDT_SGL_MPTR_CONTIG, and then if metadata is
508 	 * built using SGL, cmd.psdt is changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
509 	 * by this function. We need to verify if this indeed is the case.
510 	 */
511 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
512 	tr.prp_sgl_bus_addr = 0xDBADBEEF;
513 	MOCK_SET(spdk_vtophys, 0xDCADBEE0);
514 
515 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
516 	CU_ASSERT(rc == 0);
517 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_SGL);
518 	CU_ASSERT(tr.meta_sgl.address == 0xDCADBEE0);
519 	CU_ASSERT(tr.meta_sgl.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
520 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 4096);
521 	CU_ASSERT(tr.meta_sgl.unkeyed.subtype == 0);
522 	CU_ASSERT(req.cmd.mptr == (0xDBADBEEF - sizeof(struct spdk_nvme_sgl_descriptor)));
523 
524 	/* Non-IOVA contiguous metadata buffers should fail. */
525 	g_vtophys_size = 1024;
526 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
527 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
528 	CU_ASSERT(rc == -EINVAL);
529 	g_vtophys_size = 0;
530 
531 	MOCK_CLEAR(spdk_vtophys);
532 
533 	/* Build non sgl metadata */
534 	MOCK_SET(spdk_vtophys, 0xDDADBEE0);
535 
536 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
537 	CU_ASSERT(rc == 0);
538 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
539 
540 	/* Build non sgl metadata while sgls are supported */
541 	memset(&tr.meta_sgl, 0, sizeof(tr.meta_sgl));
542 	/* If SGLs are supported, but not in metadata, the cmd.psdt
543 	 * shall not be changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
544 	 */
545 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
546 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, false, true);
547 	CU_ASSERT(rc == 0);
548 	CU_ASSERT(tr.meta_sgl.address == 0);
549 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 0);
550 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
551 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
552 
553 	/* Non-IOVA contiguous metadata buffers should fail. */
554 	g_vtophys_size = 1024;
555 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
556 	CU_ASSERT(rc == -EINVAL);
557 	g_vtophys_size = 0;
558 
559 	MOCK_CLEAR(spdk_vtophys);
560 }
561 
562 static int
563 nvme_pcie_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
564 {
565 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
566 	struct iovec *iov;
567 
568 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
569 
570 	iov = &bio->iovs[bio->iovpos];
571 
572 	*address = iov->iov_base;
573 	*length = iov->iov_len;
574 	bio->iovpos++;
575 
576 	return 0;
577 }
578 
579 static void
580 nvme_pcie_ut_reset_sgl(void *cb_arg, uint32_t offset)
581 {
582 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
583 	struct iovec *iov;
584 
585 	for (bio->iovpos = 0; bio->iovpos < NVME_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
586 		iov = &bio->iovs[bio->iovpos];
587 		/* Offset must be aligned with the start of any SGL entry */
588 		if (offset == 0) {
589 			break;
590 		}
591 
592 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
593 		offset -= iov->iov_len;
594 	}
595 
596 	SPDK_CU_ASSERT_FATAL(offset == 0);
597 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
598 }
599 
600 static void
601 test_nvme_pcie_qpair_build_prps_sgl_request(void)
602 {
603 	struct spdk_nvme_qpair qpair = {};
604 	struct nvme_request req = {};
605 	struct nvme_tracker tr = {};
606 	struct spdk_nvme_ctrlr ctrlr = {};
607 	struct nvme_pcie_ut_bdev_io bio = {};
608 	int rc;
609 
610 	tr.req = &req;
611 	qpair.ctrlr = &ctrlr;
612 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
613 	req.payload_size = 4096;
614 	ctrlr.page_size = 4096;
615 	bio.iovs[0].iov_base = (void *)0x100000;
616 	bio.iovs[0].iov_len = 4096;
617 
618 	rc = nvme_pcie_qpair_build_prps_sgl_request(&qpair, &req, &tr, NULL);
619 	CU_ASSERT(rc == 0);
620 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
621 }
622 
623 static void
624 test_nvme_pcie_qpair_build_hw_sgl_request(void)
625 {
626 	struct nvme_pcie_qpair pqpair = {};
627 	struct spdk_nvme_qpair *qpair = &pqpair.qpair;
628 	struct nvme_request req = {};
629 	struct nvme_tracker tr = {};
630 	struct nvme_pcie_ut_bdev_io bio = {};
631 	struct spdk_nvme_ctrlr ctrlr = {};
632 	int rc;
633 
634 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
635 	qpair->ctrlr = &ctrlr;
636 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
637 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
638 	tr.prp_sgl_bus_addr =  0xDAADBEE0;
639 	g_vtophys_size = 4096;
640 
641 	/* Multiple vectors, 2k + 4k + 2k */
642 	req.payload_size = 8192;
643 	bio.iovpos = 3;
644 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
645 	bio.iovs[0].iov_len = 2048;
646 	bio.iovs[1].iov_base = (void *)0xDCADBEE0;
647 	bio.iovs[1].iov_len = 4096;
648 	bio.iovs[2].iov_base = (void *)0xDDADBEE0;
649 	bio.iovs[2].iov_len = 2048;
650 
651 	rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, &req, &tr, true);
652 	CU_ASSERT(rc == 0);
653 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
654 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 2048);
655 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
656 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
657 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
658 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 4096);
659 	CU_ASSERT(tr.u.sgl[1].address == 0xDCADBEE0);
660 	CU_ASSERT(tr.u.sgl[2].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
661 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
662 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
663 	CU_ASSERT(tr.u.sgl[2].address == 0xDDADBEE0);
664 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
665 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
666 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
667 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDAADBEE0);
668 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 48);
669 
670 	/* Single vector */
671 	memset(&tr, 0, sizeof(tr));
672 	memset(&bio, 0, sizeof(bio));
673 	memset(&req, 0, sizeof(req));
674 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
675 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
676 	req.payload_size = 4096;
677 	bio.iovpos = 1;
678 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
679 	bio.iovs[0].iov_len = 4096;
680 
681 	rc = nvme_pcie_qpair_build_hw_sgl_request(qpair, &req, &tr, true);
682 	CU_ASSERT(rc == 0);
683 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
684 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 4096);
685 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
686 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
687 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
688 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
689 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
690 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDBADBEE0);
691 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4096);
692 }
693 
694 static void
695 test_nvme_pcie_qpair_build_contig_request(void)
696 {
697 	struct nvme_pcie_qpair pqpair = {};
698 	struct nvme_request req = {};
699 	struct nvme_tracker tr = {};
700 	struct spdk_nvme_ctrlr ctrlr = {};
701 	int rc;
702 
703 	pqpair.qpair.ctrlr = &ctrlr;
704 	ctrlr.page_size = 0x1000;
705 
706 	/* 1 prp, 4k-aligned */
707 	prp_list_prep(&tr, &req, NULL);
708 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
709 	req.payload_size = 0x1000;
710 
711 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
712 	CU_ASSERT(rc == 0);
713 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
714 
715 	/* 2 prps, non-4K-aligned */
716 	prp_list_prep(&tr, &req, NULL);
717 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
718 	req.payload_size = 0x1000;
719 	req.payload_offset = 0x800;
720 
721 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
722 	CU_ASSERT(rc == 0);
723 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
724 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
725 
726 	/* 3 prps, 4k-aligned */
727 	prp_list_prep(&tr, &req, NULL);
728 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
729 	req.payload_size = 0x3000;
730 
731 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
732 	CU_ASSERT(rc == 0);
733 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
734 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
735 	CU_ASSERT(tr.u.prp[0] == 0x101000);
736 	CU_ASSERT(tr.u.prp[1] == 0x102000);
737 
738 	/* address not dword aligned */
739 	prp_list_prep(&tr, &req, NULL);
740 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100001, NULL);
741 	req.payload_size = 0x3000;
742 	req.qpair = &pqpair.qpair;
743 	TAILQ_INIT(&pqpair.outstanding_tr);
744 	TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list);
745 
746 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
747 	CU_ASSERT(rc == -EFAULT);
748 }
749 
750 static void
751 test_nvme_pcie_ctrlr_regs_get_set(void)
752 {
753 	struct nvme_pcie_ctrlr pctrlr = {};
754 	volatile struct spdk_nvme_registers regs = {};
755 	uint32_t value_4;
756 	uint64_t value_8;
757 	int rc;
758 
759 	pctrlr.regs = &regs;
760 
761 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 8, 4);
762 	CU_ASSERT(rc == 0);
763 
764 	rc = nvme_pcie_ctrlr_get_reg_4(&pctrlr.ctrlr, 8, &value_4);
765 	CU_ASSERT(rc == 0);
766 	CU_ASSERT(value_4 == 4);
767 
768 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, 0, 0x100000000);
769 	CU_ASSERT(rc == 0);
770 
771 	rc = nvme_pcie_ctrlr_get_reg_8(&pctrlr.ctrlr, 0, &value_8);
772 	CU_ASSERT(rc == 0);
773 	CU_ASSERT(value_8 == 0x100000000);
774 }
775 
776 static void
777 test_nvme_pcie_ctrlr_map_unmap_cmb(void)
778 {
779 	struct nvme_pcie_ctrlr pctrlr = {};
780 	volatile struct spdk_nvme_registers regs = {};
781 	union spdk_nvme_cmbsz_register cmbsz = {};
782 	union spdk_nvme_cmbloc_register cmbloc = {};
783 	struct dev_mem_resource cmd_res = {};
784 	int rc;
785 
786 	pctrlr.regs = &regs;
787 	pctrlr.devhandle = (void *)&cmd_res;
788 	cmd_res.addr = (void *)0x7f7c0080d000;
789 	cmd_res.len = 0x800000;
790 	cmd_res.phys_addr = 0xFC800000;
791 	/* Configure cmb size with unit size 4k, offset 100, unsupported SQ */
792 	cmbsz.bits.sz = 512;
793 	cmbsz.bits.szu = 0;
794 	cmbsz.bits.sqs = 0;
795 	cmbloc.bits.bir = 0;
796 	cmbloc.bits.ofst = 100;
797 
798 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
799 				  cmbsz.raw);
800 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
801 				  cmbloc.raw);
802 
803 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
804 	CU_ASSERT(pctrlr.cmb.bar_va == (void *)0x7f7c0080d000);
805 	CU_ASSERT(pctrlr.cmb.bar_pa == 0xFC800000);
806 	CU_ASSERT(pctrlr.cmb.size == 512 * 4096);
807 	CU_ASSERT(pctrlr.cmb.current_offset == 4096 * 100);
808 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
809 
810 	rc = nvme_pcie_ctrlr_unmap_cmb(&pctrlr);
811 	CU_ASSERT(rc == 0);
812 
813 	/* Invalid mapping information */
814 	memset(&pctrlr.cmb, 0, sizeof(pctrlr.cmb));
815 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 0);
816 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 0);
817 
818 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
819 	CU_ASSERT(pctrlr.cmb.bar_va == NULL);
820 	CU_ASSERT(pctrlr.cmb.bar_pa == 0);
821 	CU_ASSERT(pctrlr.cmb.size == 0);
822 	CU_ASSERT(pctrlr.cmb.current_offset == 0);
823 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
824 }
825 
826 
827 static void
828 prepare_map_io_cmd(struct nvme_pcie_ctrlr *pctrlr)
829 {
830 	union spdk_nvme_cmbsz_register cmbsz = {};
831 	union spdk_nvme_cmbloc_register cmbloc = {};
832 
833 	cmbsz.bits.sz = 512;
834 	cmbsz.bits.wds = 1;
835 	cmbsz.bits.rds = 1;
836 
837 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
838 				  cmbsz.raw);
839 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
840 				  cmbloc.raw);
841 
842 	pctrlr->cmb.bar_va = (void *)0x7F7C0080D000;
843 	pctrlr->cmb.bar_pa = 0xFC800000;
844 	pctrlr->cmb.current_offset = 1ULL << 22;
845 	pctrlr->cmb.size = (1ULL << 22) * 512;
846 	pctrlr->cmb.mem_register_addr = NULL;
847 	pctrlr->ctrlr.opts.use_cmb_sqs = false;
848 }
849 
850 static void
851 test_nvme_pcie_ctrlr_map_io_cmb(void)
852 {
853 	struct nvme_pcie_ctrlr pctrlr = {};
854 	volatile struct spdk_nvme_registers regs = {};
855 	union spdk_nvme_cmbsz_register cmbsz = {};
856 	void *mem_reg_addr = NULL;
857 	size_t size;
858 	int rc;
859 
860 	pctrlr.regs = &regs;
861 	prepare_map_io_cmd(&pctrlr);
862 
863 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
864 	/* Ceil the current cmb vaddr and cmb size to 2MB_aligned */
865 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C00E00000);
866 	CU_ASSERT(size == 0x7FE00000);
867 
868 	rc = nvme_pcie_ctrlr_unmap_io_cmb(&pctrlr.ctrlr);
869 	CU_ASSERT(rc == 0);
870 	CU_ASSERT(pctrlr.cmb.mem_register_addr == NULL);
871 	CU_ASSERT(pctrlr.cmb.mem_register_size == 0);
872 
873 	/* cmb mem_register_addr not NULL */
874 	prepare_map_io_cmd(&pctrlr);
875 	pctrlr.cmb.mem_register_addr = (void *)0xDEADBEEF;
876 	pctrlr.cmb.mem_register_size = 1024;
877 
878 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
879 	CU_ASSERT(size == 1024);
880 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
881 
882 	/* cmb.bar_va is NULL */
883 	prepare_map_io_cmd(&pctrlr);
884 	pctrlr.cmb.bar_va = NULL;
885 
886 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
887 	CU_ASSERT(mem_reg_addr == NULL);
888 	CU_ASSERT(size == 0);
889 
890 	/* submission queue already used */
891 	prepare_map_io_cmd(&pctrlr);
892 	pctrlr.ctrlr.opts.use_cmb_sqs = true;
893 
894 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
895 	CU_ASSERT(mem_reg_addr == NULL);
896 	CU_ASSERT(size == 0);
897 
898 	pctrlr.ctrlr.opts.use_cmb_sqs = false;
899 
900 	/* Only SQS is supported */
901 	prepare_map_io_cmd(&pctrlr);
902 	cmbsz.bits.wds = 0;
903 	cmbsz.bits.rds = 0;
904 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
905 				  cmbsz.raw);
906 
907 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
908 	CU_ASSERT(mem_reg_addr == NULL);
909 	CU_ASSERT(size == 0);
910 
911 	/* CMB size is less than 4MB */
912 	prepare_map_io_cmd(&pctrlr);
913 	pctrlr.cmb.size = 1ULL << 16;
914 
915 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
916 	CU_ASSERT(mem_reg_addr == NULL);
917 	CU_ASSERT(size == 0);
918 }
919 
920 static void
921 test_nvme_pcie_ctrlr_map_unmap_pmr(void)
922 {
923 	struct nvme_pcie_ctrlr pctrlr = {};
924 	volatile struct spdk_nvme_registers regs = {};
925 	union spdk_nvme_pmrcap_register pmrcap = {};
926 	struct dev_mem_resource cmd_res = {};
927 	int rc;
928 
929 	pctrlr.regs = &regs;
930 	pctrlr.devhandle = (void *)&cmd_res;
931 	regs.cap.bits.pmrs = 1;
932 	cmd_res.addr = (void *)0x7F7C0080d000;
933 	cmd_res.len = 0x800000;
934 	cmd_res.phys_addr = 0xFC800000;
935 	pmrcap.bits.bir = 2;
936 	pmrcap.bits.cmss = 1;
937 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
938 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
939 				  pmrcap.raw);
940 
941 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
942 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
943 	/* Controller memory space enable, bit 1 */
944 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0xFC800002);
945 	CU_ASSERT(pctrlr.regs->pmrsts.raw == 0);
946 	CU_ASSERT(pctrlr.pmr.bar_va == (void *)0x7F7C0080d000);
947 	CU_ASSERT(pctrlr.pmr.bar_pa == 0xFC800000);
948 	CU_ASSERT(pctrlr.pmr.size == 0x800000);
949 
950 	rc = nvme_pcie_ctrlr_unmap_pmr(&pctrlr);
951 	CU_ASSERT(rc == 0);
952 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
953 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0);
954 
955 	/* pmrcap value invalid */
956 	memset(&pctrlr, 0, sizeof(pctrlr));
957 	memset((void *)&regs, 0, sizeof(regs));
958 	memset(&cmd_res, 0, sizeof(cmd_res));
959 
960 	pctrlr.regs = &regs;
961 	pctrlr.devhandle = (void *)&cmd_res;
962 	regs.cap.bits.pmrs = 1;
963 	cmd_res.addr = (void *)0x7F7C0080d000;
964 	cmd_res.len = 0x800000;
965 	cmd_res.phys_addr = 0xFC800000;
966 	pmrcap.raw = 0;
967 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
968 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
969 				  pmrcap.raw);
970 
971 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
972 	CU_ASSERT(pctrlr.pmr.bar_va == NULL);
973 	CU_ASSERT(pctrlr.pmr.bar_pa == 0);
974 	CU_ASSERT(pctrlr.pmr.size == 0);
975 }
976 
977 static void
978 test_nvme_pcie_ctrlr_config_pmr(void)
979 {
980 	struct nvme_pcie_ctrlr pctrlr = {};
981 	union spdk_nvme_pmrcap_register pmrcap = {};
982 	union spdk_nvme_pmrsts_register pmrsts = {};
983 	union spdk_nvme_cap_register	cap = {};
984 	union spdk_nvme_pmrctl_register pmrctl = {};
985 	volatile struct spdk_nvme_registers regs = {};
986 	int rc;
987 
988 	/* pmrctl enable */
989 	pctrlr.regs = &regs;
990 	pmrcap.bits.pmrtu = 0;
991 	pmrcap.bits.pmrto = 1;
992 	pmrsts.bits.nrdy = false;
993 	pmrctl.bits.en = 0;
994 	cap.bits.pmrs = 1;
995 
996 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
997 	SPDK_CU_ASSERT_FATAL(rc == 0);
998 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
999 				       cap.raw);
1000 	SPDK_CU_ASSERT_FATAL(rc == 0);
1001 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1002 				       pmrcap.raw);
1003 	SPDK_CU_ASSERT_FATAL(rc == 0);
1004 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1005 				       pmrsts.raw);
1006 	SPDK_CU_ASSERT_FATAL(rc == 0);
1007 
1008 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, true);
1009 	CU_ASSERT(rc == 0);
1010 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1011 	CU_ASSERT(rc == 0);
1012 	CU_ASSERT(pmrctl.bits.en == true);
1013 
1014 	/* pmrctl disable */
1015 	pmrsts.bits.nrdy = true;
1016 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1017 				       pmrsts.raw);
1018 	SPDK_CU_ASSERT_FATAL(rc == 0);
1019 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
1020 	SPDK_CU_ASSERT_FATAL(rc == 0);
1021 
1022 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1023 	CU_ASSERT(rc == 0);
1024 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1025 	CU_ASSERT(rc == 0);
1026 	CU_ASSERT(pmrctl.bits.en == false);
1027 
1028 	/* configuration exist */
1029 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1030 	CU_ASSERT(rc == -EINVAL);
1031 }
1032 
1033 static void
1034 map_io_pmr_init(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrcap_register *pmrcap)
1035 {
1036 	pmrcap->raw = 0;
1037 	pmrcap->bits.rds = 1;
1038 	pmrcap->bits.wds = 1;
1039 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1040 				  pmrcap->raw);
1041 	pctrlr->regs->cap.bits.pmrs = 1;
1042 	pctrlr->pmr.mem_register_size = 0;
1043 	pctrlr->pmr.mem_register_addr = NULL;
1044 	pctrlr->pmr.bar_va = (void *)0x7F7C00E30000;
1045 	pctrlr->pmr.size = (1 << 22) * 128;
1046 }
1047 
1048 static void
1049 test_nvme_pcie_ctrlr_map_io_pmr(void)
1050 {
1051 	struct nvme_pcie_ctrlr pctrlr = {};
1052 	struct spdk_nvme_ctrlr *ctrlr;
1053 	volatile struct spdk_nvme_registers regs = {};
1054 	union spdk_nvme_pmrcap_register pmrcap;
1055 	void *mem_reg_addr = NULL;
1056 	size_t rt_size = 0;
1057 
1058 	ctrlr = &pctrlr.ctrlr;
1059 	pctrlr.regs = &regs;
1060 
1061 	/* PMR is not supported by the controller */
1062 	map_io_pmr_init(&pctrlr, &pmrcap);
1063 	regs.cap.bits.pmrs = 0;
1064 
1065 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1066 	CU_ASSERT(mem_reg_addr == NULL);
1067 
1068 	/* mem_register_addr not NULL. */
1069 	map_io_pmr_init(&pctrlr, &pmrcap);
1070 	pctrlr.pmr.mem_register_addr = (void *)0xDEADBEEF;
1071 	pctrlr.pmr.mem_register_size = 1024;
1072 
1073 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1074 	CU_ASSERT(rt_size == 1024);
1075 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
1076 
1077 	/* PMR not available */
1078 	map_io_pmr_init(&pctrlr, &pmrcap);
1079 	pctrlr.pmr.bar_va = NULL;
1080 	pctrlr.pmr.mem_register_addr = NULL;
1081 
1082 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1083 	CU_ASSERT(mem_reg_addr == NULL);
1084 	CU_ASSERT(rt_size == 0);
1085 
1086 	/* WDS / RDS is not supported */
1087 	map_io_pmr_init(&pctrlr, &pmrcap);
1088 	pmrcap.bits.rds = 0;
1089 	pmrcap.bits.wds = 0;
1090 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1091 				  pmrcap.raw);
1092 
1093 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1094 	CU_ASSERT(mem_reg_addr == NULL);
1095 	CU_ASSERT(rt_size == 0);
1096 
1097 	/* PMR is less than 4MiB in size then abort PMR mapping  */
1098 	map_io_pmr_init(&pctrlr, &pmrcap);
1099 	pctrlr.pmr.size = (1ULL << 20);
1100 
1101 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1102 	CU_ASSERT(mem_reg_addr == NULL);
1103 	CU_ASSERT(rt_size == 0);
1104 
1105 	/* All parameters success */
1106 	map_io_pmr_init(&pctrlr, &pmrcap);
1107 
1108 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1109 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C01000000);
1110 	CU_ASSERT(rt_size == 0x1FE00000);
1111 }
1112 
1113 int
1114 main(int argc, char **argv)
1115 {
1116 	CU_pSuite	suite = NULL;
1117 	unsigned int	num_failures;
1118 
1119 	CU_initialize_registry();
1120 
1121 	suite = CU_add_suite("nvme_pcie", NULL, NULL);
1122 	CU_ADD_TEST(suite, test_prp_list_append);
1123 	CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
1124 	CU_ADD_TEST(suite, test_shadow_doorbell_update);
1125 	CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
1126 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_metadata);
1127 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_prps_sgl_request);
1128 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_hw_sgl_request);
1129 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_contig_request);
1130 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_regs_get_set);
1131 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_cmb);
1132 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_cmb);
1133 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_pmr);
1134 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_config_pmr);
1135 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_pmr);
1136 
1137 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1138 	CU_cleanup_registry();
1139 	return num_failures;
1140 }
1141