xref: /spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c (revision 8dd1cd2104ea4001e4a0da2a4851ccd62c82f8e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 
11 #define UNIT_TEST_NO_VTOPHYS
12 
13 #include "nvme/nvme_pcie.c"
14 #include "nvme/nvme_pcie_common.c"
15 #include "common/lib/nvme/common_stubs.h"
16 
17 pid_t g_spdk_nvme_pid;
18 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
19 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
20 
21 DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
22 
23 DEFINE_STUB(nvme_wait_for_completion, int,
24 	    (struct spdk_nvme_qpair *qpair,
25 	     struct nvme_completion_poll_status *status), 0);
26 DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
27 
28 DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
29 		struct nvme_request *req), 0);
30 DEFINE_STUB_V(nvme_ctrlr_free_processes, (struct spdk_nvme_ctrlr *ctrlr));
31 DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
32 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
33 DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
34 	    0);
35 DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
36 		void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
37 DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
38 DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
39 DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
40 DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
41 		uint32_t offset), 0);
42 DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
43 		uint32_t offset), 0);
44 DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0});
45 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0);
46 DEFINE_STUB(spdk_pci_register_error_handler, int, (spdk_pci_error_handler sighandler, void *ctx),
47 	    0);
48 DEFINE_STUB_V(spdk_pci_unregister_error_handler, (spdk_pci_error_handler sighandler));
49 DEFINE_STUB(spdk_pci_enumerate, int,
50 	    (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx),
51 	    -1);
52 
53 DEFINE_STUB(nvme_transport_get_name, const char *, (const struct spdk_nvme_transport *transport),
54 	    NULL);
55 
56 SPDK_LOG_REGISTER_COMPONENT(nvme)
57 
58 struct dev_mem_resource {
59 	uint64_t phys_addr;
60 	uint64_t len;
61 	void *addr;
62 };
63 
64 struct nvme_pcie_ut_bdev_io {
65 	struct iovec iovs[NVME_MAX_SGL_DESCRIPTORS];
66 	int iovpos;
67 };
68 
69 struct nvme_driver *g_spdk_nvme_driver = NULL;
70 
71 int
72 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
73 			void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
74 {
75 	struct dev_mem_resource *dev_mem_res = (void *)dev;
76 
77 	*mapped_addr = dev_mem_res->addr;
78 	*phys_addr = dev_mem_res->phys_addr;
79 	*size = dev_mem_res->len;
80 
81 	return 0;
82 }
83 
84 void
85 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
86 {
87 	CU_ASSERT(ctrlr != NULL);
88 	if (hot_remove) {
89 		ctrlr->is_removed = true;
90 	}
91 
92 	ctrlr->is_failed = true;
93 }
94 
95 static uint64_t g_vtophys_size = 0;
96 
97 DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
98 uint64_t
99 spdk_vtophys(const void *buf, uint64_t *size)
100 {
101 	if (size) {
102 		*size = g_vtophys_size;
103 	}
104 
105 	HANDLE_RETURN_MOCK(spdk_vtophys);
106 
107 	return (uintptr_t)buf;
108 }
109 
110 DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
111 DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
112 				    struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
113 DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
114 DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
115 	    (const struct spdk_nvme_transport_id *trid), NULL);
116 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
117 	    (struct spdk_nvme_ctrlr *ctrlr), {});
118 DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
119 	    (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
120 DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
121 DEFINE_STUB_V(nvme_ctrlr_process_async_event, (struct spdk_nvme_ctrlr *ctrlr,
122 		const struct spdk_nvme_cpl *cpl));
123 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
124 		struct spdk_nvme_cmd *cmd));
125 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
126 		struct spdk_nvme_cpl *cpl));
127 
128 static void
129 prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
130 {
131 	memset(req, 0, sizeof(*req));
132 	memset(tr, 0, sizeof(*tr));
133 	tr->req = req;
134 	tr->prp_sgl_bus_addr = 0xDEADBEEF;
135 	if (prp_index) {
136 		*prp_index = 0;
137 	}
138 }
139 
140 static void
141 test_prp_list_append(void)
142 {
143 	struct nvme_request req;
144 	struct nvme_tracker tr;
145 	struct spdk_nvme_ctrlr ctrlr = {};
146 	uint32_t prp_index;
147 
148 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
149 	/* Non-DWORD-aligned buffer (invalid) */
150 	prp_list_prep(&tr, &req, &prp_index);
151 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000,
152 					    0x1000) == -EFAULT);
153 
154 	/* 512-byte buffer, 4K aligned */
155 	prp_list_prep(&tr, &req, &prp_index);
156 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
157 	CU_ASSERT(prp_index == 1);
158 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
159 
160 	/* 512-byte buffer, non-4K-aligned */
161 	prp_list_prep(&tr, &req, &prp_index);
162 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
163 	CU_ASSERT(prp_index == 1);
164 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
165 
166 	/* 4K buffer, 4K aligned */
167 	prp_list_prep(&tr, &req, &prp_index);
168 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
169 					    0x1000) == 0);
170 	CU_ASSERT(prp_index == 1);
171 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
172 
173 	/* 4K buffer, non-4K aligned */
174 	prp_list_prep(&tr, &req, &prp_index);
175 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
176 					    0x1000) == 0);
177 	CU_ASSERT(prp_index == 2);
178 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
179 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
180 
181 	/* 8K buffer, 4K aligned */
182 	prp_list_prep(&tr, &req, &prp_index);
183 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000,
184 					    0x1000) == 0);
185 	CU_ASSERT(prp_index == 2);
186 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
187 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
188 
189 	/* 8K buffer, non-4K aligned */
190 	prp_list_prep(&tr, &req, &prp_index);
191 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000,
192 					    0x1000) == 0);
193 	CU_ASSERT(prp_index == 3);
194 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
195 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
196 	CU_ASSERT(tr.u.prp[0] == 0x101000);
197 	CU_ASSERT(tr.u.prp[1] == 0x102000);
198 
199 	/* 12K buffer, 4K aligned */
200 	prp_list_prep(&tr, &req, &prp_index);
201 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000,
202 					    0x1000) == 0);
203 	CU_ASSERT(prp_index == 3);
204 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
205 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
206 	CU_ASSERT(tr.u.prp[0] == 0x101000);
207 	CU_ASSERT(tr.u.prp[1] == 0x102000);
208 
209 	/* 12K buffer, non-4K aligned */
210 	prp_list_prep(&tr, &req, &prp_index);
211 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000,
212 					    0x1000) == 0);
213 	CU_ASSERT(prp_index == 4);
214 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
215 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
216 	CU_ASSERT(tr.u.prp[0] == 0x101000);
217 	CU_ASSERT(tr.u.prp[1] == 0x102000);
218 	CU_ASSERT(tr.u.prp[2] == 0x103000);
219 
220 	/* Two 4K buffers, both 4K aligned */
221 	prp_list_prep(&tr, &req, &prp_index);
222 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
223 					    0x1000) == 0);
224 	CU_ASSERT(prp_index == 1);
225 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
226 					    0x1000) == 0);
227 	CU_ASSERT(prp_index == 2);
228 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
229 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
230 
231 	/* Two 4K buffers, first non-4K aligned, second 4K aligned */
232 	prp_list_prep(&tr, &req, &prp_index);
233 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
234 					    0x1000) == 0);
235 	CU_ASSERT(prp_index == 2);
236 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
237 					    0x1000) == 0);
238 	CU_ASSERT(prp_index == 3);
239 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
240 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
241 	CU_ASSERT(tr.u.prp[0] == 0x101000);
242 	CU_ASSERT(tr.u.prp[1] == 0x900000);
243 
244 	/* Two 4K buffers, both non-4K aligned (invalid) */
245 	prp_list_prep(&tr, &req, &prp_index);
246 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
247 					    0x1000) == 0);
248 	CU_ASSERT(prp_index == 2);
249 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000,
250 					    0x1000) == -EFAULT);
251 	CU_ASSERT(prp_index == 2);
252 
253 	/* 4K buffer, 4K aligned, but vtophys fails */
254 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
255 	prp_list_prep(&tr, &req, &prp_index);
256 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
257 					    0x1000) == -EFAULT);
258 	MOCK_CLEAR(spdk_vtophys);
259 
260 	/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
261 	prp_list_prep(&tr, &req, &prp_index);
262 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
263 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
264 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
265 
266 	/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
267 	prp_list_prep(&tr, &req, &prp_index);
268 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
269 					    NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
270 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
271 
272 	/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
273 	prp_list_prep(&tr, &req, &prp_index);
274 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
275 					    (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
276 
277 	/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
278 	prp_list_prep(&tr, &req, &prp_index);
279 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
280 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
281 }
282 
283 struct spdk_event_entry {
284 	struct spdk_pci_event		event;
285 	STAILQ_ENTRY(spdk_event_entry)	link;
286 };
287 
288 static STAILQ_HEAD(, spdk_event_entry) g_events = STAILQ_HEAD_INITIALIZER(g_events);
289 static bool g_device_allowed = false;
290 
291 int
292 spdk_pci_get_event(int fd, struct spdk_pci_event *event)
293 {
294 	struct spdk_event_entry *entry;
295 
296 	if (STAILQ_EMPTY(&g_events)) {
297 		return 0;
298 	}
299 
300 	entry = STAILQ_FIRST(&g_events);
301 	STAILQ_REMOVE_HEAD(&g_events, link);
302 
303 	*event = entry->event;
304 
305 	return 1;
306 }
307 
308 int
309 spdk_pci_device_allow(struct spdk_pci_addr *pci_addr)
310 {
311 	g_device_allowed = true;
312 
313 	return 0;
314 }
315 
316 static void
317 test_nvme_pcie_hotplug_monitor(void)
318 {
319 	struct nvme_pcie_ctrlr pctrlr = {};
320 	struct spdk_event_entry entry = {};
321 	struct nvme_driver driver;
322 	pthread_mutexattr_t attr;
323 	struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
324 
325 	/* Initiate variables and ctrlr */
326 	driver.initialized = true;
327 	driver.hotplug_fd = 123;
328 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
329 	CU_ASSERT(pthread_mutex_init(&pctrlr.ctrlr.ctrlr_lock, &attr) == 0);
330 	CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
331 	TAILQ_INIT(&driver.shared_attached_ctrlrs);
332 	g_spdk_nvme_driver = &driver;
333 
334 	/* Case 1:  SPDK_NVME_UEVENT_ADD/ NVME_VFIO / NVME_UIO */
335 	entry.event.action = SPDK_UEVENT_ADD;
336 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
337 	CU_ASSERT(STAILQ_EMPTY(&g_events));
338 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
339 
340 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
341 
342 	CU_ASSERT(STAILQ_EMPTY(&g_events));
343 	CU_ASSERT(g_device_allowed == true);
344 	g_device_allowed = false;
345 
346 	/* Case 2: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
347 	entry.event.action = SPDK_UEVENT_REMOVE;
348 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
349 	CU_ASSERT(STAILQ_EMPTY(&g_events));
350 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
351 
352 	MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
353 
354 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
355 
356 	CU_ASSERT(STAILQ_EMPTY(&g_events));
357 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
358 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
359 	pctrlr.ctrlr.is_failed = false;
360 	pctrlr.ctrlr.is_removed = false;
361 	MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
362 
363 	/* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO without event */
364 	pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
365 	snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
366 	pctrlr.ctrlr.remove_cb = NULL;
367 	pctrlr.ctrlr.is_failed = false;
368 	pctrlr.ctrlr.is_removed = false;
369 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
370 
371 	/* This should be set in the vfio req notifier cb */
372 	MOCK_SET(spdk_pci_device_is_removed, true);
373 
374 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
375 
376 	CU_ASSERT(STAILQ_EMPTY(&g_events));
377 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
378 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
379 	pctrlr.ctrlr.is_failed = false;
380 	pctrlr.ctrlr.is_removed = false;
381 	MOCK_CLEAR(spdk_pci_device_is_removed);
382 
383 	/* Case 4:  Removed device detected in another process  */
384 	MOCK_SET(spdk_pci_device_is_removed, false);
385 
386 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
387 
388 	CU_ASSERT(pctrlr.ctrlr.is_failed == false);
389 
390 	MOCK_SET(spdk_pci_device_is_removed, true);
391 
392 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
393 
394 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
395 
396 	pthread_mutex_destroy(&driver.lock);
397 	pthread_mutex_destroy(&pctrlr.ctrlr.ctrlr_lock);
398 	pthread_mutexattr_destroy(&attr);
399 	g_spdk_nvme_driver = NULL;
400 }
401 
402 static void
403 test_shadow_doorbell_update(void)
404 {
405 	bool ret;
406 
407 	/* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
408 	ret = nvme_pcie_qpair_need_event(10, 15, 14);
409 	CU_ASSERT(ret == false);
410 
411 	ret = nvme_pcie_qpair_need_event(14, 15, 14);
412 	CU_ASSERT(ret == true);
413 }
414 
415 static void
416 test_build_contig_hw_sgl_request(void)
417 {
418 	struct spdk_nvme_qpair qpair = {};
419 	struct nvme_request req = {};
420 	struct nvme_tracker tr = {};
421 	struct spdk_nvme_ctrlr ctrlr = {};
422 	int rc;
423 
424 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
425 	qpair.ctrlr = &ctrlr;
426 	/* Test 1: Payload covered by a single mapping */
427 	req.payload_size = 100;
428 	req.payload = NVME_PAYLOAD_CONTIG(0, 0);
429 	g_vtophys_size = 100;
430 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
431 
432 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
433 	CU_ASSERT(rc == 0);
434 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
435 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
436 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
437 
438 	MOCK_CLEAR(spdk_vtophys);
439 	g_vtophys_size = 0;
440 	memset(&qpair, 0, sizeof(qpair));
441 	memset(&req, 0, sizeof(req));
442 	memset(&tr, 0, sizeof(tr));
443 
444 	/* Test 2: Payload covered by a single mapping, but request is at an offset */
445 	qpair.ctrlr = &ctrlr;
446 	req.payload_size = 100;
447 	req.payload_offset = 50;
448 	req.payload = NVME_PAYLOAD_CONTIG(0, 0);
449 	g_vtophys_size = 1000;
450 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
451 
452 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
453 	CU_ASSERT(rc == 0);
454 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
455 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
456 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
457 
458 	MOCK_CLEAR(spdk_vtophys);
459 	g_vtophys_size = 0;
460 	memset(&qpair, 0, sizeof(qpair));
461 	memset(&req, 0, sizeof(req));
462 	memset(&tr, 0, sizeof(tr));
463 
464 	/* Test 3: Payload spans two mappings */
465 	qpair.ctrlr = &ctrlr;
466 	req.payload_size = 100;
467 	req.payload = NVME_PAYLOAD_CONTIG(0, 0);
468 	g_vtophys_size = 60;
469 	tr.prp_sgl_bus_addr = 0xFF0FF;
470 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
471 
472 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
473 	CU_ASSERT(rc == 0);
474 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
475 	CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
476 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
477 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
478 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
479 	CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
480 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
481 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
482 	CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
483 
484 	MOCK_CLEAR(spdk_vtophys);
485 	g_vtophys_size = 0;
486 	memset(&qpair, 0, sizeof(qpair));
487 	memset(&req, 0, sizeof(req));
488 	memset(&tr, 0, sizeof(tr));
489 }
490 
491 static void
492 test_nvme_pcie_qpair_build_metadata(void)
493 {
494 	struct spdk_nvme_qpair qpair = {};
495 	struct nvme_tracker tr = {};
496 	struct nvme_request req = {};
497 	struct spdk_nvme_ctrlr	ctrlr = {};
498 	int rc;
499 
500 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
501 	tr.req = &req;
502 	qpair.ctrlr = &ctrlr;
503 
504 	req.payload.md = (void *)0xDEADBEE0;
505 	req.md_offset = 0;
506 	req.md_size = 4096;
507 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
508 	tr.prp_sgl_bus_addr = 0xDBADBEEF;
509 	MOCK_SET(spdk_vtophys, 0xDCADBEE0);
510 
511 	rc = nvme_pcie_qpair_build_metadata(&qpair, &tr, true, true);
512 	CU_ASSERT(rc == 0);
513 	CU_ASSERT(req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL);
514 	CU_ASSERT(tr.meta_sgl.address == 0xDCADBEE0);
515 	CU_ASSERT(tr.meta_sgl.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
516 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 4096);
517 	CU_ASSERT(tr.meta_sgl.unkeyed.subtype == 0);
518 	CU_ASSERT(req.cmd.mptr == (0xDBADBEEF - sizeof(struct spdk_nvme_sgl_descriptor)));
519 	MOCK_CLEAR(spdk_vtophys);
520 
521 	/* Build non sgl metadata */
522 	MOCK_SET(spdk_vtophys, 0xDDADBEE0);
523 
524 	rc = nvme_pcie_qpair_build_metadata(&qpair, &tr, false, true);
525 	CU_ASSERT(rc == 0);
526 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
527 	MOCK_CLEAR(spdk_vtophys);
528 }
529 
530 static int
531 nvme_pcie_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
532 {
533 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
534 	struct iovec *iov;
535 
536 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
537 
538 	iov = &bio->iovs[bio->iovpos];
539 
540 	*address = iov->iov_base;
541 	*length = iov->iov_len;
542 	bio->iovpos++;
543 
544 	return 0;
545 }
546 
547 static void
548 nvme_pcie_ut_reset_sgl(void *cb_arg, uint32_t offset)
549 {
550 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
551 	struct iovec *iov;
552 
553 	for (bio->iovpos = 0; bio->iovpos < NVME_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
554 		iov = &bio->iovs[bio->iovpos];
555 		/* Offset must be aligned with the start of any SGL entry */
556 		if (offset == 0) {
557 			break;
558 		}
559 
560 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
561 		offset -= iov->iov_len;
562 	}
563 
564 	SPDK_CU_ASSERT_FATAL(offset == 0);
565 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
566 }
567 
568 static void
569 test_nvme_pcie_qpair_build_prps_sgl_request(void)
570 {
571 	struct spdk_nvme_qpair qpair = {};
572 	struct nvme_request req = {};
573 	struct nvme_tracker tr = {};
574 	struct spdk_nvme_ctrlr ctrlr = {};
575 	struct nvme_pcie_ut_bdev_io bio = {};
576 	int rc;
577 
578 	tr.req = &req;
579 	qpair.ctrlr = &ctrlr;
580 	req.payload.contig_or_cb_arg = &bio;
581 
582 	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
583 	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
584 	req.payload_size = 4096;
585 	ctrlr.page_size = 4096;
586 	bio.iovs[0].iov_base = (void *)0x100000;
587 	bio.iovs[0].iov_len = 4096;
588 
589 	rc = nvme_pcie_qpair_build_prps_sgl_request(&qpair, &req, &tr, NULL);
590 	CU_ASSERT(rc == 0);
591 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
592 }
593 
594 static void
595 test_nvme_pcie_qpair_build_hw_sgl_request(void)
596 {
597 	struct spdk_nvme_qpair qpair = {};
598 	struct nvme_request req = {};
599 	struct nvme_tracker tr = {};
600 	struct nvme_pcie_ut_bdev_io bio = {};
601 	struct spdk_nvme_ctrlr ctrlr = {};
602 	int rc;
603 
604 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
605 	qpair.ctrlr = &ctrlr;
606 	req.payload.contig_or_cb_arg = &bio;
607 	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
608 	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
609 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
610 	tr.prp_sgl_bus_addr =  0xDAADBEE0;
611 	g_vtophys_size = 4096;
612 
613 	/* Multiple vectors, 2k + 4k + 2k */
614 	req.payload_size = 8192;
615 	bio.iovpos = 3;
616 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
617 	bio.iovs[0].iov_len = 2048;
618 	bio.iovs[1].iov_base = (void *)0xDCADBEE0;
619 	bio.iovs[1].iov_len = 4096;
620 	bio.iovs[2].iov_base = (void *)0xDDADBEE0;
621 	bio.iovs[2].iov_len = 2048;
622 
623 	rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true);
624 	CU_ASSERT(rc == 0);
625 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
626 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 2048);
627 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
628 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
629 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
630 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 4096);
631 	CU_ASSERT(tr.u.sgl[1].address == 0xDCADBEE0);
632 	CU_ASSERT(tr.u.sgl[2].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
633 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
634 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
635 	CU_ASSERT(tr.u.sgl[2].address == 0xDDADBEE0);
636 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
637 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
638 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
639 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDAADBEE0);
640 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 48);
641 
642 	/* Single vector */
643 	memset(&tr, 0, sizeof(tr));
644 	memset(&bio, 0, sizeof(bio));
645 	memset(&req, 0, sizeof(req));
646 	req.payload.contig_or_cb_arg = &bio;
647 	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
648 	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
649 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
650 	req.payload_size = 4096;
651 	bio.iovpos = 1;
652 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
653 	bio.iovs[0].iov_len = 4096;
654 
655 	rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true);
656 	CU_ASSERT(rc == 0);
657 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
658 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 4096);
659 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
660 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
661 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
662 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
663 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
664 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDBADBEE0);
665 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4096);
666 }
667 
668 static void
669 test_nvme_pcie_qpair_build_contig_request(void)
670 {
671 	struct nvme_pcie_qpair pqpair = {};
672 	struct nvme_request req = {};
673 	struct nvme_tracker tr = {};
674 	struct spdk_nvme_ctrlr ctrlr = {};
675 	int rc;
676 
677 	pqpair.qpair.ctrlr = &ctrlr;
678 	ctrlr.page_size = 0x1000;
679 
680 	/* 1 prp, 4k-aligned */
681 	prp_list_prep(&tr, &req, NULL);
682 	req.payload_size = 0x1000;
683 	req.payload.contig_or_cb_arg = (void *)0x100000;
684 
685 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
686 	CU_ASSERT(rc == 0);
687 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
688 
689 	/* 2 prps, non-4K-aligned */
690 	prp_list_prep(&tr, &req, NULL);
691 	req.payload_size = 0x1000;
692 	req.payload_offset = 0x800;
693 	req.payload.contig_or_cb_arg = (void *)0x100000;
694 
695 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
696 	CU_ASSERT(rc == 0);
697 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
698 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
699 
700 	/* 3 prps, 4k-aligned */
701 	prp_list_prep(&tr, &req, NULL);
702 	req.payload_size = 0x3000;
703 	req.payload.contig_or_cb_arg = (void *)0x100000;
704 
705 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
706 	CU_ASSERT(rc == 0);
707 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
708 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
709 	CU_ASSERT(tr.u.prp[0] == 0x101000);
710 	CU_ASSERT(tr.u.prp[1] == 0x102000);
711 
712 	/* address not dword aligned */
713 	prp_list_prep(&tr, &req, NULL);
714 	req.payload_size = 0x3000;
715 	req.payload.contig_or_cb_arg = (void *)0x100001;
716 	req.qpair = &pqpair.qpair;
717 	TAILQ_INIT(&pqpair.outstanding_tr);
718 	TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list);
719 
720 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
721 	CU_ASSERT(rc == -EFAULT);
722 }
723 
724 static void
725 test_nvme_pcie_ctrlr_regs_get_set(void)
726 {
727 	struct nvme_pcie_ctrlr pctrlr = {};
728 	volatile struct spdk_nvme_registers regs = {};
729 	uint32_t value_4;
730 	uint64_t value_8;
731 	int rc;
732 
733 	pctrlr.regs = &regs;
734 
735 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 8, 4);
736 	CU_ASSERT(rc == 0);
737 
738 	rc = nvme_pcie_ctrlr_get_reg_4(&pctrlr.ctrlr, 8, &value_4);
739 	CU_ASSERT(rc == 0);
740 	CU_ASSERT(value_4 == 4);
741 
742 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, 0, 0x100000000);
743 	CU_ASSERT(rc == 0);
744 
745 	rc = nvme_pcie_ctrlr_get_reg_8(&pctrlr.ctrlr, 0, &value_8);
746 	CU_ASSERT(rc == 0);
747 	CU_ASSERT(value_8 == 0x100000000);
748 }
749 
750 static void
751 test_nvme_pcie_ctrlr_map_unmap_cmb(void)
752 {
753 	struct nvme_pcie_ctrlr pctrlr = {};
754 	volatile struct spdk_nvme_registers regs = {};
755 	union spdk_nvme_cmbsz_register cmbsz = {};
756 	union spdk_nvme_cmbloc_register cmbloc = {};
757 	struct dev_mem_resource cmd_res = {};
758 	int rc;
759 
760 	pctrlr.regs = &regs;
761 	pctrlr.devhandle = (void *)&cmd_res;
762 	cmd_res.addr = (void *)0x7f7c0080d000;
763 	cmd_res.len = 0x800000;
764 	cmd_res.phys_addr = 0xFC800000;
765 	/* Configure cmb size with unit size 4k, offset 100, unsupported SQ */
766 	cmbsz.bits.sz = 512;
767 	cmbsz.bits.szu = 0;
768 	cmbsz.bits.sqs = 0;
769 	cmbloc.bits.bir = 0;
770 	cmbloc.bits.ofst = 100;
771 
772 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
773 				  cmbsz.raw);
774 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
775 				  cmbloc.raw);
776 
777 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
778 	CU_ASSERT(pctrlr.cmb.bar_va == (void *)0x7f7c0080d000);
779 	CU_ASSERT(pctrlr.cmb.bar_pa == 0xFC800000);
780 	CU_ASSERT(pctrlr.cmb.size == 512 * 4096);
781 	CU_ASSERT(pctrlr.cmb.current_offset == 4096 * 100);
782 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
783 
784 	rc = nvme_pcie_ctrlr_unmap_cmb(&pctrlr);
785 	CU_ASSERT(rc == 0);
786 
787 	/* Invalid mapping information */
788 	memset(&pctrlr.cmb, 0, sizeof(pctrlr.cmb));
789 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 0);
790 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 0);
791 
792 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
793 	CU_ASSERT(pctrlr.cmb.bar_va == NULL);
794 	CU_ASSERT(pctrlr.cmb.bar_pa == 0);
795 	CU_ASSERT(pctrlr.cmb.size == 0);
796 	CU_ASSERT(pctrlr.cmb.current_offset == 0);
797 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
798 }
799 
800 
801 static void
802 prepare_map_io_cmd(struct nvme_pcie_ctrlr *pctrlr)
803 {
804 	union spdk_nvme_cmbsz_register cmbsz = {};
805 	union spdk_nvme_cmbloc_register cmbloc = {};
806 
807 	cmbsz.bits.sz = 512;
808 	cmbsz.bits.wds = 1;
809 	cmbsz.bits.rds = 1;
810 
811 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
812 				  cmbsz.raw);
813 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
814 				  cmbloc.raw);
815 
816 	pctrlr->cmb.bar_va = (void *)0x7F7C0080D000;
817 	pctrlr->cmb.bar_pa = 0xFC800000;
818 	pctrlr->cmb.current_offset = 1ULL << 22;
819 	pctrlr->cmb.size = (1ULL << 22) * 512;
820 	pctrlr->cmb.mem_register_addr = NULL;
821 	pctrlr->ctrlr.opts.use_cmb_sqs = false;
822 }
823 
824 static void
825 test_nvme_pcie_ctrlr_map_io_cmb(void)
826 {
827 	struct nvme_pcie_ctrlr pctrlr = {};
828 	volatile struct spdk_nvme_registers regs = {};
829 	union spdk_nvme_cmbsz_register cmbsz = {};
830 	void *mem_reg_addr = NULL;
831 	size_t size;
832 	int rc;
833 
834 	pctrlr.regs = &regs;
835 	prepare_map_io_cmd(&pctrlr);
836 
837 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
838 	/* Ceil the current cmb vaddr and cmb size to 2MB_aligned */
839 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C00E00000);
840 	CU_ASSERT(size == 0x7FE00000);
841 
842 	rc = nvme_pcie_ctrlr_unmap_io_cmb(&pctrlr.ctrlr);
843 	CU_ASSERT(rc == 0);
844 	CU_ASSERT(pctrlr.cmb.mem_register_addr == NULL);
845 	CU_ASSERT(pctrlr.cmb.mem_register_size == 0);
846 
847 	/* cmb mem_register_addr not NULL */
848 	prepare_map_io_cmd(&pctrlr);
849 	pctrlr.cmb.mem_register_addr = (void *)0xDEADBEEF;
850 	pctrlr.cmb.mem_register_size = 1024;
851 
852 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
853 	CU_ASSERT(size == 1024);
854 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
855 
856 	/* cmb.bar_va is NULL */
857 	prepare_map_io_cmd(&pctrlr);
858 	pctrlr.cmb.bar_va = NULL;
859 
860 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
861 	CU_ASSERT(mem_reg_addr == NULL);
862 	CU_ASSERT(size == 0);
863 
864 	/* submission queue already used */
865 	prepare_map_io_cmd(&pctrlr);
866 	pctrlr.ctrlr.opts.use_cmb_sqs = true;
867 
868 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
869 	CU_ASSERT(mem_reg_addr == NULL);
870 	CU_ASSERT(size == 0);
871 
872 	pctrlr.ctrlr.opts.use_cmb_sqs = false;
873 
874 	/* Only SQS is supported */
875 	prepare_map_io_cmd(&pctrlr);
876 	cmbsz.bits.wds = 0;
877 	cmbsz.bits.rds = 0;
878 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
879 				  cmbsz.raw);
880 
881 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
882 	CU_ASSERT(mem_reg_addr == NULL);
883 	CU_ASSERT(size == 0);
884 
885 	/* CMB size is less than 4MB */
886 	prepare_map_io_cmd(&pctrlr);
887 	pctrlr.cmb.size = 1ULL << 16;
888 
889 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
890 	CU_ASSERT(mem_reg_addr == NULL);
891 	CU_ASSERT(size == 0);
892 }
893 
894 static void
895 test_nvme_pcie_ctrlr_map_unmap_pmr(void)
896 {
897 	struct nvme_pcie_ctrlr pctrlr = {};
898 	volatile struct spdk_nvme_registers regs = {};
899 	union spdk_nvme_pmrcap_register pmrcap = {};
900 	struct dev_mem_resource cmd_res = {};
901 	int rc;
902 
903 	pctrlr.regs = &regs;
904 	pctrlr.devhandle = (void *)&cmd_res;
905 	regs.cap.bits.pmrs = 1;
906 	cmd_res.addr = (void *)0x7F7C0080d000;
907 	cmd_res.len = 0x800000;
908 	cmd_res.phys_addr = 0xFC800000;
909 	pmrcap.bits.bir = 2;
910 	pmrcap.bits.cmss = 1;
911 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
912 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
913 				  pmrcap.raw);
914 
915 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
916 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
917 	/* Controller memory space enable, bit 1 */
918 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0xFC800002);
919 	CU_ASSERT(pctrlr.regs->pmrsts.raw == 0);
920 	CU_ASSERT(pctrlr.pmr.bar_va == (void *)0x7F7C0080d000);
921 	CU_ASSERT(pctrlr.pmr.bar_pa == 0xFC800000);
922 	CU_ASSERT(pctrlr.pmr.size == 0x800000);
923 
924 	rc = nvme_pcie_ctrlr_unmap_pmr(&pctrlr);
925 	CU_ASSERT(rc == 0);
926 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
927 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0);
928 
929 	/* pmrcap value invalid */
930 	memset(&pctrlr, 0, sizeof(pctrlr));
931 	memset((void *)&regs, 0, sizeof(regs));
932 	memset(&cmd_res, 0, sizeof(cmd_res));
933 
934 	pctrlr.regs = &regs;
935 	pctrlr.devhandle = (void *)&cmd_res;
936 	regs.cap.bits.pmrs = 1;
937 	cmd_res.addr = (void *)0x7F7C0080d000;
938 	cmd_res.len = 0x800000;
939 	cmd_res.phys_addr = 0xFC800000;
940 	pmrcap.raw = 0;
941 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
942 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
943 				  pmrcap.raw);
944 
945 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
946 	CU_ASSERT(pctrlr.pmr.bar_va == NULL);
947 	CU_ASSERT(pctrlr.pmr.bar_pa == 0);
948 	CU_ASSERT(pctrlr.pmr.size == 0);
949 }
950 
951 static void
952 test_nvme_pcie_ctrlr_config_pmr(void)
953 {
954 	struct nvme_pcie_ctrlr pctrlr = {};
955 	union spdk_nvme_pmrcap_register pmrcap = {};
956 	union spdk_nvme_pmrsts_register pmrsts = {};
957 	union spdk_nvme_cap_register	cap = {};
958 	union spdk_nvme_pmrctl_register pmrctl = {};
959 	volatile struct spdk_nvme_registers regs = {};
960 	int rc;
961 
962 	/* pmrctl enable */
963 	pctrlr.regs = &regs;
964 	pmrcap.bits.pmrtu = 0;
965 	pmrcap.bits.pmrto = 1;
966 	pmrsts.bits.nrdy = false;
967 	pmrctl.bits.en = 0;
968 	cap.bits.pmrs = 1;
969 
970 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
971 	SPDK_CU_ASSERT_FATAL(rc == 0);
972 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
973 				       cap.raw);
974 	SPDK_CU_ASSERT_FATAL(rc == 0);
975 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
976 				       pmrcap.raw);
977 	SPDK_CU_ASSERT_FATAL(rc == 0);
978 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
979 				       pmrsts.raw);
980 	SPDK_CU_ASSERT_FATAL(rc == 0);
981 
982 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, true);
983 	CU_ASSERT(rc == 0);
984 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
985 	CU_ASSERT(rc == 0);
986 	CU_ASSERT(pmrctl.bits.en == true);
987 
988 	/* pmrctl disable */
989 	pmrsts.bits.nrdy = true;
990 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
991 				       pmrsts.raw);
992 	SPDK_CU_ASSERT_FATAL(rc == 0);
993 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
994 	SPDK_CU_ASSERT_FATAL(rc == 0);
995 
996 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
997 	CU_ASSERT(rc == 0);
998 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
999 	CU_ASSERT(rc == 0);
1000 	CU_ASSERT(pmrctl.bits.en == false);
1001 
1002 	/* configuration exist */
1003 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1004 	CU_ASSERT(rc == -EINVAL);
1005 }
1006 
1007 int
1008 main(int argc, char **argv)
1009 {
1010 	CU_pSuite	suite = NULL;
1011 	unsigned int	num_failures;
1012 
1013 	CU_set_error_action(CUEA_ABORT);
1014 	CU_initialize_registry();
1015 
1016 	suite = CU_add_suite("nvme_pcie", NULL, NULL);
1017 	CU_ADD_TEST(suite, test_prp_list_append);
1018 	CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
1019 	CU_ADD_TEST(suite, test_shadow_doorbell_update);
1020 	CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
1021 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_metadata);
1022 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_prps_sgl_request);
1023 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_hw_sgl_request);
1024 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_contig_request);
1025 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_regs_get_set);
1026 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_cmb);
1027 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_cmb);
1028 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_pmr);
1029 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_config_pmr);
1030 
1031 	CU_basic_set_mode(CU_BRM_VERBOSE);
1032 	CU_basic_run_tests();
1033 	num_failures = CU_get_number_of_failures();
1034 	CU_cleanup_registry();
1035 	return num_failures;
1036 }
1037