xref: /spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c (revision 14e26b9d0410a98689caffcba7bfacac8d85c74d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 
11 #define UNIT_TEST_NO_VTOPHYS
12 
13 #include "nvme/nvme_pcie.c"
14 #include "nvme/nvme_pcie_common.c"
15 #include "common/lib/nvme/common_stubs.h"
16 
17 pid_t g_spdk_nvme_pid;
18 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
19 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
20 
21 DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
22 
23 DEFINE_STUB(nvme_wait_for_completion, int,
24 	    (struct spdk_nvme_qpair *qpair,
25 	     struct nvme_completion_poll_status *status), 0);
26 DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
27 
28 DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
29 		struct nvme_request *req), 0);
30 
31 DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
32 	    (struct spdk_nvme_ctrlr *ctrlr), NULL);
33 DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
34 	    0);
35 DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
36 		void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
37 DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
38 DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
39 DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
40 DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
41 		uint32_t offset), 0);
42 DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
43 		uint32_t offset), 0);
44 DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0});
45 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0);
46 DEFINE_STUB(spdk_pci_register_error_handler, int, (spdk_pci_error_handler sighandler, void *ctx),
47 	    0);
48 DEFINE_STUB_V(spdk_pci_unregister_error_handler, (spdk_pci_error_handler sighandler));
49 DEFINE_STUB(spdk_pci_enumerate, int,
50 	    (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx),
51 	    -1);
52 
53 SPDK_LOG_REGISTER_COMPONENT(nvme)
54 
55 struct dev_mem_resource {
56 	uint64_t phys_addr;
57 	uint64_t len;
58 	void *addr;
59 };
60 
61 struct nvme_pcie_ut_bdev_io {
62 	struct iovec iovs[NVME_MAX_SGL_DESCRIPTORS];
63 	int iovpos;
64 };
65 
66 struct nvme_driver *g_spdk_nvme_driver = NULL;
67 
68 int
69 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
70 			void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
71 {
72 	struct dev_mem_resource *dev_mem_res = (void *)dev;
73 
74 	*mapped_addr = dev_mem_res->addr;
75 	*phys_addr = dev_mem_res->phys_addr;
76 	*size = dev_mem_res->len;
77 
78 	return 0;
79 }
80 
81 void
82 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
83 {
84 	CU_ASSERT(ctrlr != NULL);
85 	if (hot_remove) {
86 		ctrlr->is_removed = true;
87 	}
88 
89 	ctrlr->is_failed = true;
90 }
91 
92 static uint64_t g_vtophys_size = 0;
93 
94 DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
95 uint64_t
96 spdk_vtophys(const void *buf, uint64_t *size)
97 {
98 	if (size && g_vtophys_size > 0) {
99 		*size = g_vtophys_size;
100 	}
101 
102 	HANDLE_RETURN_MOCK(spdk_vtophys);
103 
104 	return (uintptr_t)buf;
105 }
106 
107 DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
108 DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
109 				    struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
110 DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
111 DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
112 	    (const struct spdk_nvme_transport_id *trid), NULL);
113 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
114 	    (struct spdk_nvme_ctrlr *ctrlr), {});
115 DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
116 	    (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
117 DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
118 DEFINE_STUB_V(nvme_ctrlr_process_async_event, (struct spdk_nvme_ctrlr *ctrlr,
119 		const struct spdk_nvme_cpl *cpl));
120 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
121 		struct spdk_nvme_cmd *cmd));
122 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
123 		struct spdk_nvme_cpl *cpl));
124 
125 static void
126 prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
127 {
128 	memset(req, 0, sizeof(*req));
129 	memset(tr, 0, sizeof(*tr));
130 	tr->req = req;
131 	tr->prp_sgl_bus_addr = 0xDEADBEEF;
132 	if (prp_index) {
133 		*prp_index = 0;
134 	}
135 }
136 
137 static void
138 test_prp_list_append(void)
139 {
140 	struct nvme_request req;
141 	struct nvme_tracker tr;
142 	struct spdk_nvme_ctrlr ctrlr = {};
143 	uint32_t prp_index;
144 
145 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
146 	/* Non-DWORD-aligned buffer (invalid) */
147 	prp_list_prep(&tr, &req, &prp_index);
148 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000,
149 					    0x1000) == -EFAULT);
150 
151 	/* 512-byte buffer, 4K aligned */
152 	prp_list_prep(&tr, &req, &prp_index);
153 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
154 	CU_ASSERT(prp_index == 1);
155 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
156 
157 	/* 512-byte buffer, non-4K-aligned */
158 	prp_list_prep(&tr, &req, &prp_index);
159 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
160 	CU_ASSERT(prp_index == 1);
161 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
162 
163 	/* 4K buffer, 4K aligned */
164 	prp_list_prep(&tr, &req, &prp_index);
165 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
166 					    0x1000) == 0);
167 	CU_ASSERT(prp_index == 1);
168 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
169 
170 	/* 4K buffer, non-4K aligned */
171 	prp_list_prep(&tr, &req, &prp_index);
172 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
173 					    0x1000) == 0);
174 	CU_ASSERT(prp_index == 2);
175 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
176 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
177 
178 	/* 8K buffer, 4K aligned */
179 	prp_list_prep(&tr, &req, &prp_index);
180 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000,
181 					    0x1000) == 0);
182 	CU_ASSERT(prp_index == 2);
183 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
184 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
185 
186 	/* 8K buffer, non-4K aligned */
187 	prp_list_prep(&tr, &req, &prp_index);
188 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000,
189 					    0x1000) == 0);
190 	CU_ASSERT(prp_index == 3);
191 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
192 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
193 	CU_ASSERT(tr.u.prp[0] == 0x101000);
194 	CU_ASSERT(tr.u.prp[1] == 0x102000);
195 
196 	/* 12K buffer, 4K aligned */
197 	prp_list_prep(&tr, &req, &prp_index);
198 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000,
199 					    0x1000) == 0);
200 	CU_ASSERT(prp_index == 3);
201 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
202 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
203 	CU_ASSERT(tr.u.prp[0] == 0x101000);
204 	CU_ASSERT(tr.u.prp[1] == 0x102000);
205 
206 	/* 12K buffer, non-4K aligned */
207 	prp_list_prep(&tr, &req, &prp_index);
208 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000,
209 					    0x1000) == 0);
210 	CU_ASSERT(prp_index == 4);
211 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
212 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
213 	CU_ASSERT(tr.u.prp[0] == 0x101000);
214 	CU_ASSERT(tr.u.prp[1] == 0x102000);
215 	CU_ASSERT(tr.u.prp[2] == 0x103000);
216 
217 	/* Two 4K buffers, both 4K aligned */
218 	prp_list_prep(&tr, &req, &prp_index);
219 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
220 					    0x1000) == 0);
221 	CU_ASSERT(prp_index == 1);
222 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
223 					    0x1000) == 0);
224 	CU_ASSERT(prp_index == 2);
225 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
226 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
227 
228 	/* Two 4K buffers, first non-4K aligned, second 4K aligned */
229 	prp_list_prep(&tr, &req, &prp_index);
230 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
231 					    0x1000) == 0);
232 	CU_ASSERT(prp_index == 2);
233 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000,
234 					    0x1000) == 0);
235 	CU_ASSERT(prp_index == 3);
236 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
237 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
238 	CU_ASSERT(tr.u.prp[0] == 0x101000);
239 	CU_ASSERT(tr.u.prp[1] == 0x900000);
240 
241 	/* Two 4K buffers, both non-4K aligned (invalid) */
242 	prp_list_prep(&tr, &req, &prp_index);
243 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000,
244 					    0x1000) == 0);
245 	CU_ASSERT(prp_index == 2);
246 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000,
247 					    0x1000) == -EFAULT);
248 	CU_ASSERT(prp_index == 2);
249 
250 	/* 4K buffer, 4K aligned, but vtophys fails */
251 	MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
252 	prp_list_prep(&tr, &req, &prp_index);
253 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000,
254 					    0x1000) == -EFAULT);
255 	MOCK_CLEAR(spdk_vtophys);
256 
257 	/* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
258 	prp_list_prep(&tr, &req, &prp_index);
259 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
260 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
261 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
262 
263 	/* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
264 	prp_list_prep(&tr, &req, &prp_index);
265 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
266 					    NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
267 	CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
268 
269 	/* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
270 	prp_list_prep(&tr, &req, &prp_index);
271 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000,
272 					    (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
273 
274 	/* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
275 	prp_list_prep(&tr, &req, &prp_index);
276 	CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800,
277 					    (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
278 }
279 
280 struct spdk_event_entry {
281 	struct spdk_pci_event		event;
282 	STAILQ_ENTRY(spdk_event_entry)	link;
283 };
284 
285 static STAILQ_HEAD(, spdk_event_entry) g_events = STAILQ_HEAD_INITIALIZER(g_events);
286 static bool g_device_allowed = false;
287 
288 int
289 spdk_pci_get_event(int fd, struct spdk_pci_event *event)
290 {
291 	struct spdk_event_entry *entry;
292 
293 	if (STAILQ_EMPTY(&g_events)) {
294 		return 0;
295 	}
296 
297 	entry = STAILQ_FIRST(&g_events);
298 	STAILQ_REMOVE_HEAD(&g_events, link);
299 
300 	*event = entry->event;
301 
302 	return 1;
303 }
304 
305 int
306 spdk_pci_device_allow(struct spdk_pci_addr *pci_addr)
307 {
308 	g_device_allowed = true;
309 
310 	return 0;
311 }
312 
313 static void
314 test_nvme_pcie_hotplug_monitor(void)
315 {
316 	struct nvme_pcie_ctrlr pctrlr = {};
317 	struct spdk_event_entry entry = {};
318 	struct nvme_driver driver;
319 	pthread_mutexattr_t attr;
320 	struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
321 
322 	/* Initiate variables and ctrlr */
323 	driver.initialized = true;
324 	driver.hotplug_fd = 123;
325 	CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
326 	CU_ASSERT(pthread_mutex_init(&pctrlr.ctrlr.ctrlr_lock, &attr) == 0);
327 	CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
328 	TAILQ_INIT(&driver.shared_attached_ctrlrs);
329 	g_spdk_nvme_driver = &driver;
330 
331 	/* Case 1:  SPDK_NVME_UEVENT_ADD/ NVME_VFIO / NVME_UIO */
332 	entry.event.action = SPDK_UEVENT_ADD;
333 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
334 	CU_ASSERT(STAILQ_EMPTY(&g_events));
335 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
336 
337 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
338 
339 	CU_ASSERT(STAILQ_EMPTY(&g_events));
340 	CU_ASSERT(g_device_allowed == true);
341 	g_device_allowed = false;
342 
343 	/* Case 2: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
344 	entry.event.action = SPDK_UEVENT_REMOVE;
345 	spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0");
346 	CU_ASSERT(STAILQ_EMPTY(&g_events));
347 	STAILQ_INSERT_TAIL(&g_events, &entry, link);
348 
349 	MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
350 
351 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
352 
353 	CU_ASSERT(STAILQ_EMPTY(&g_events));
354 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
355 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
356 	pctrlr.ctrlr.is_failed = false;
357 	pctrlr.ctrlr.is_removed = false;
358 	MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
359 
360 	/* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO without event */
361 	pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
362 	snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
363 	pctrlr.ctrlr.remove_cb = NULL;
364 	pctrlr.ctrlr.is_failed = false;
365 	pctrlr.ctrlr.is_removed = false;
366 	TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
367 
368 	/* This should be set in the vfio req notifier cb */
369 	MOCK_SET(spdk_pci_device_is_removed, true);
370 
371 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
372 
373 	CU_ASSERT(STAILQ_EMPTY(&g_events));
374 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
375 	CU_ASSERT(pctrlr.ctrlr.is_removed == true);
376 	pctrlr.ctrlr.is_failed = false;
377 	pctrlr.ctrlr.is_removed = false;
378 	MOCK_CLEAR(spdk_pci_device_is_removed);
379 
380 	/* Case 4:  Removed device detected in another process  */
381 	MOCK_SET(spdk_pci_device_is_removed, false);
382 
383 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
384 
385 	CU_ASSERT(pctrlr.ctrlr.is_failed == false);
386 
387 	MOCK_SET(spdk_pci_device_is_removed, true);
388 
389 	_nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
390 
391 	CU_ASSERT(pctrlr.ctrlr.is_failed == true);
392 
393 	pthread_mutex_destroy(&driver.lock);
394 	pthread_mutex_destroy(&pctrlr.ctrlr.ctrlr_lock);
395 	pthread_mutexattr_destroy(&attr);
396 	g_spdk_nvme_driver = NULL;
397 }
398 
399 static void
400 test_shadow_doorbell_update(void)
401 {
402 	bool ret;
403 
404 	/* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
405 	ret = nvme_pcie_qpair_need_event(10, 15, 14);
406 	CU_ASSERT(ret == false);
407 
408 	ret = nvme_pcie_qpair_need_event(14, 15, 14);
409 	CU_ASSERT(ret == true);
410 }
411 
412 static void
413 test_build_contig_hw_sgl_request(void)
414 {
415 	struct spdk_nvme_qpair qpair = {};
416 	struct nvme_request req = {};
417 	struct nvme_tracker tr = {};
418 	struct spdk_nvme_ctrlr ctrlr = {};
419 	int rc;
420 
421 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
422 	qpair.ctrlr = &ctrlr;
423 	/* Test 1: Payload covered by a single mapping */
424 	req.payload_size = 100;
425 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
426 	g_vtophys_size = 100;
427 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
428 
429 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
430 	CU_ASSERT(rc == 0);
431 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
432 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
433 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
434 
435 	MOCK_CLEAR(spdk_vtophys);
436 	g_vtophys_size = 0;
437 	memset(&qpair, 0, sizeof(qpair));
438 	memset(&req, 0, sizeof(req));
439 	memset(&tr, 0, sizeof(tr));
440 
441 	/* Test 2: Payload covered by a single mapping, but request is at an offset */
442 	qpair.ctrlr = &ctrlr;
443 	req.payload_size = 100;
444 	req.payload_offset = 50;
445 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
446 	g_vtophys_size = 1000;
447 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
448 
449 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
450 	CU_ASSERT(rc == 0);
451 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
452 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
453 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
454 
455 	MOCK_CLEAR(spdk_vtophys);
456 	g_vtophys_size = 0;
457 	memset(&qpair, 0, sizeof(qpair));
458 	memset(&req, 0, sizeof(req));
459 	memset(&tr, 0, sizeof(tr));
460 
461 	/* Test 3: Payload spans two mappings */
462 	qpair.ctrlr = &ctrlr;
463 	req.payload_size = 100;
464 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xbeef0, NULL);
465 	g_vtophys_size = 60;
466 	tr.prp_sgl_bus_addr = 0xFF0FF;
467 	MOCK_SET(spdk_vtophys, 0xDEADBEEF);
468 
469 	rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
470 	CU_ASSERT(rc == 0);
471 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
472 	CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
473 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
474 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
475 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
476 	CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
477 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
478 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
479 	CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
480 
481 	MOCK_CLEAR(spdk_vtophys);
482 	g_vtophys_size = 0;
483 	memset(&qpair, 0, sizeof(qpair));
484 	memset(&req, 0, sizeof(req));
485 	memset(&tr, 0, sizeof(tr));
486 }
487 
488 static void
489 test_nvme_pcie_qpair_build_metadata(void)
490 {
491 	struct nvme_pcie_qpair pqpair = {};
492 	struct spdk_nvme_qpair *qpair = &pqpair.qpair;
493 	struct nvme_tracker tr = {};
494 	struct nvme_request req = {};
495 	struct spdk_nvme_ctrlr	ctrlr = {};
496 	int rc;
497 
498 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
499 	tr.req = &req;
500 	qpair->ctrlr = &ctrlr;
501 
502 	req.payload = NVME_PAYLOAD_CONTIG(NULL, (void *)0xDEADBEE0);
503 	req.md_offset = 0;
504 	req.md_size = 4096;
505 	/* The nvme_pcie_qpair_build_metadata() function expects the cmd.psdt
506 	 * is set to SPDK_NVME_PSDT_SGL_MPTR_CONTIG, and then if metadata is
507 	 * built using SGL, cmd.psdt is changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
508 	 * by this function. We need to verify if this indeed is the case.
509 	 */
510 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
511 	tr.prp_sgl_bus_addr = 0xDBADBEEF;
512 	MOCK_SET(spdk_vtophys, 0xDCADBEE0);
513 
514 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
515 	CU_ASSERT(rc == 0);
516 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_SGL);
517 	CU_ASSERT(tr.meta_sgl.address == 0xDCADBEE0);
518 	CU_ASSERT(tr.meta_sgl.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
519 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 4096);
520 	CU_ASSERT(tr.meta_sgl.unkeyed.subtype == 0);
521 	CU_ASSERT(req.cmd.mptr == (0xDBADBEEF - sizeof(struct spdk_nvme_sgl_descriptor)));
522 
523 	/* Non-IOVA contiguous metadata buffers should fail. */
524 	g_vtophys_size = 1024;
525 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
526 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, true, true);
527 	CU_ASSERT(rc == -EINVAL);
528 	g_vtophys_size = 0;
529 
530 	MOCK_CLEAR(spdk_vtophys);
531 
532 	/* Build non sgl metadata */
533 	MOCK_SET(spdk_vtophys, 0xDDADBEE0);
534 
535 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
536 	CU_ASSERT(rc == 0);
537 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
538 
539 	/* Build non sgl metadata while sgls are supported */
540 	memset(&tr.meta_sgl, 0, sizeof(tr.meta_sgl));
541 	/* If SGLs are supported, but not in metadata, the cmd.psdt
542 	 * shall not be changed to SPDK_NVME_PSDT_SGL_MPTR_SGL
543 	 */
544 	req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
545 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, true, false, true);
546 	CU_ASSERT(rc == 0);
547 	CU_ASSERT(tr.meta_sgl.address == 0);
548 	CU_ASSERT(tr.meta_sgl.unkeyed.length == 0);
549 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
550 	CU_ASSERT(req.cmd.mptr == 0xDDADBEE0);
551 
552 	/* Non-IOVA contiguous metadata buffers should fail. */
553 	g_vtophys_size = 1024;
554 	rc = nvme_pcie_qpair_build_metadata(qpair, &tr, false, false, true);
555 	CU_ASSERT(rc == -EINVAL);
556 	g_vtophys_size = 0;
557 
558 	MOCK_CLEAR(spdk_vtophys);
559 }
560 
561 static int
562 nvme_pcie_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
563 {
564 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
565 	struct iovec *iov;
566 
567 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
568 
569 	iov = &bio->iovs[bio->iovpos];
570 
571 	*address = iov->iov_base;
572 	*length = iov->iov_len;
573 	bio->iovpos++;
574 
575 	return 0;
576 }
577 
578 static void
579 nvme_pcie_ut_reset_sgl(void *cb_arg, uint32_t offset)
580 {
581 	struct nvme_pcie_ut_bdev_io *bio = cb_arg;
582 	struct iovec *iov;
583 
584 	for (bio->iovpos = 0; bio->iovpos < NVME_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
585 		iov = &bio->iovs[bio->iovpos];
586 		/* Offset must be aligned with the start of any SGL entry */
587 		if (offset == 0) {
588 			break;
589 		}
590 
591 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
592 		offset -= iov->iov_len;
593 	}
594 
595 	SPDK_CU_ASSERT_FATAL(offset == 0);
596 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS);
597 }
598 
599 static void
600 test_nvme_pcie_qpair_build_prps_sgl_request(void)
601 {
602 	struct spdk_nvme_qpair qpair = {};
603 	struct nvme_request req = {};
604 	struct nvme_tracker tr = {};
605 	struct spdk_nvme_ctrlr ctrlr = {};
606 	struct nvme_pcie_ut_bdev_io bio = {};
607 	int rc;
608 
609 	tr.req = &req;
610 	qpair.ctrlr = &ctrlr;
611 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
612 	req.payload_size = 4096;
613 	ctrlr.page_size = 4096;
614 	bio.iovs[0].iov_base = (void *)0x100000;
615 	bio.iovs[0].iov_len = 4096;
616 
617 	rc = nvme_pcie_qpair_build_prps_sgl_request(&qpair, &req, &tr, NULL);
618 	CU_ASSERT(rc == 0);
619 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
620 }
621 
622 static void
623 test_nvme_pcie_qpair_build_hw_sgl_request(void)
624 {
625 	struct spdk_nvme_qpair qpair = {};
626 	struct nvme_request req = {};
627 	struct nvme_tracker tr = {};
628 	struct nvme_pcie_ut_bdev_io bio = {};
629 	struct spdk_nvme_ctrlr ctrlr = {};
630 	int rc;
631 
632 	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
633 	qpair.ctrlr = &ctrlr;
634 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
635 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
636 	tr.prp_sgl_bus_addr =  0xDAADBEE0;
637 	g_vtophys_size = 4096;
638 
639 	/* Multiple vectors, 2k + 4k + 2k */
640 	req.payload_size = 8192;
641 	bio.iovpos = 3;
642 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
643 	bio.iovs[0].iov_len = 2048;
644 	bio.iovs[1].iov_base = (void *)0xDCADBEE0;
645 	bio.iovs[1].iov_len = 4096;
646 	bio.iovs[2].iov_base = (void *)0xDDADBEE0;
647 	bio.iovs[2].iov_len = 2048;
648 
649 	rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true);
650 	CU_ASSERT(rc == 0);
651 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
652 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 2048);
653 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
654 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
655 	CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
656 	CU_ASSERT(tr.u.sgl[1].unkeyed.length == 4096);
657 	CU_ASSERT(tr.u.sgl[1].address == 0xDCADBEE0);
658 	CU_ASSERT(tr.u.sgl[2].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
659 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
660 	CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048);
661 	CU_ASSERT(tr.u.sgl[2].address == 0xDDADBEE0);
662 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
663 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
664 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
665 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDAADBEE0);
666 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 48);
667 
668 	/* Single vector */
669 	memset(&tr, 0, sizeof(tr));
670 	memset(&bio, 0, sizeof(bio));
671 	memset(&req, 0, sizeof(req));
672 	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
673 	req.cmd.opc = SPDK_NVME_OPC_WRITE;
674 	req.payload_size = 4096;
675 	bio.iovpos = 1;
676 	bio.iovs[0].iov_base = (void *)0xDBADBEE0;
677 	bio.iovs[0].iov_len = 4096;
678 
679 	rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true);
680 	CU_ASSERT(rc == 0);
681 	CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
682 	CU_ASSERT(tr.u.sgl[0].unkeyed.length == 4096);
683 	CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0);
684 	CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0);
685 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
686 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0);
687 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
688 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDBADBEE0);
689 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4096);
690 }
691 
692 static void
693 test_nvme_pcie_qpair_build_contig_request(void)
694 {
695 	struct nvme_pcie_qpair pqpair = {};
696 	struct nvme_request req = {};
697 	struct nvme_tracker tr = {};
698 	struct spdk_nvme_ctrlr ctrlr = {};
699 	int rc;
700 
701 	pqpair.qpair.ctrlr = &ctrlr;
702 	ctrlr.page_size = 0x1000;
703 
704 	/* 1 prp, 4k-aligned */
705 	prp_list_prep(&tr, &req, NULL);
706 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
707 	req.payload_size = 0x1000;
708 
709 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
710 	CU_ASSERT(rc == 0);
711 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
712 
713 	/* 2 prps, non-4K-aligned */
714 	prp_list_prep(&tr, &req, NULL);
715 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
716 	req.payload_size = 0x1000;
717 	req.payload_offset = 0x800;
718 
719 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
720 	CU_ASSERT(rc == 0);
721 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
722 	CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
723 
724 	/* 3 prps, 4k-aligned */
725 	prp_list_prep(&tr, &req, NULL);
726 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
727 	req.payload_size = 0x3000;
728 
729 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
730 	CU_ASSERT(rc == 0);
731 	CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
732 	CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
733 	CU_ASSERT(tr.u.prp[0] == 0x101000);
734 	CU_ASSERT(tr.u.prp[1] == 0x102000);
735 
736 	/* address not dword aligned */
737 	prp_list_prep(&tr, &req, NULL);
738 	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100001, NULL);
739 	req.payload_size = 0x3000;
740 	req.qpair = &pqpair.qpair;
741 	TAILQ_INIT(&pqpair.outstanding_tr);
742 	TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list);
743 
744 	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
745 	CU_ASSERT(rc == -EFAULT);
746 }
747 
748 static void
749 test_nvme_pcie_ctrlr_regs_get_set(void)
750 {
751 	struct nvme_pcie_ctrlr pctrlr = {};
752 	volatile struct spdk_nvme_registers regs = {};
753 	uint32_t value_4;
754 	uint64_t value_8;
755 	int rc;
756 
757 	pctrlr.regs = &regs;
758 
759 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 8, 4);
760 	CU_ASSERT(rc == 0);
761 
762 	rc = nvme_pcie_ctrlr_get_reg_4(&pctrlr.ctrlr, 8, &value_4);
763 	CU_ASSERT(rc == 0);
764 	CU_ASSERT(value_4 == 4);
765 
766 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, 0, 0x100000000);
767 	CU_ASSERT(rc == 0);
768 
769 	rc = nvme_pcie_ctrlr_get_reg_8(&pctrlr.ctrlr, 0, &value_8);
770 	CU_ASSERT(rc == 0);
771 	CU_ASSERT(value_8 == 0x100000000);
772 }
773 
774 static void
775 test_nvme_pcie_ctrlr_map_unmap_cmb(void)
776 {
777 	struct nvme_pcie_ctrlr pctrlr = {};
778 	volatile struct spdk_nvme_registers regs = {};
779 	union spdk_nvme_cmbsz_register cmbsz = {};
780 	union spdk_nvme_cmbloc_register cmbloc = {};
781 	struct dev_mem_resource cmd_res = {};
782 	int rc;
783 
784 	pctrlr.regs = &regs;
785 	pctrlr.devhandle = (void *)&cmd_res;
786 	cmd_res.addr = (void *)0x7f7c0080d000;
787 	cmd_res.len = 0x800000;
788 	cmd_res.phys_addr = 0xFC800000;
789 	/* Configure cmb size with unit size 4k, offset 100, unsupported SQ */
790 	cmbsz.bits.sz = 512;
791 	cmbsz.bits.szu = 0;
792 	cmbsz.bits.sqs = 0;
793 	cmbloc.bits.bir = 0;
794 	cmbloc.bits.ofst = 100;
795 
796 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
797 				  cmbsz.raw);
798 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
799 				  cmbloc.raw);
800 
801 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
802 	CU_ASSERT(pctrlr.cmb.bar_va == (void *)0x7f7c0080d000);
803 	CU_ASSERT(pctrlr.cmb.bar_pa == 0xFC800000);
804 	CU_ASSERT(pctrlr.cmb.size == 512 * 4096);
805 	CU_ASSERT(pctrlr.cmb.current_offset == 4096 * 100);
806 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
807 
808 	rc = nvme_pcie_ctrlr_unmap_cmb(&pctrlr);
809 	CU_ASSERT(rc == 0);
810 
811 	/* Invalid mapping information */
812 	memset(&pctrlr.cmb, 0, sizeof(pctrlr.cmb));
813 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 0);
814 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 0);
815 
816 	nvme_pcie_ctrlr_map_cmb(&pctrlr);
817 	CU_ASSERT(pctrlr.cmb.bar_va == NULL);
818 	CU_ASSERT(pctrlr.cmb.bar_pa == 0);
819 	CU_ASSERT(pctrlr.cmb.size == 0);
820 	CU_ASSERT(pctrlr.cmb.current_offset == 0);
821 	CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false);
822 }
823 
824 
825 static void
826 prepare_map_io_cmd(struct nvme_pcie_ctrlr *pctrlr)
827 {
828 	union spdk_nvme_cmbsz_register cmbsz = {};
829 	union spdk_nvme_cmbloc_register cmbloc = {};
830 
831 	cmbsz.bits.sz = 512;
832 	cmbsz.bits.wds = 1;
833 	cmbsz.bits.rds = 1;
834 
835 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
836 				  cmbsz.raw);
837 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw),
838 				  cmbloc.raw);
839 
840 	pctrlr->cmb.bar_va = (void *)0x7F7C0080D000;
841 	pctrlr->cmb.bar_pa = 0xFC800000;
842 	pctrlr->cmb.current_offset = 1ULL << 22;
843 	pctrlr->cmb.size = (1ULL << 22) * 512;
844 	pctrlr->cmb.mem_register_addr = NULL;
845 	pctrlr->ctrlr.opts.use_cmb_sqs = false;
846 }
847 
848 static void
849 test_nvme_pcie_ctrlr_map_io_cmb(void)
850 {
851 	struct nvme_pcie_ctrlr pctrlr = {};
852 	volatile struct spdk_nvme_registers regs = {};
853 	union spdk_nvme_cmbsz_register cmbsz = {};
854 	void *mem_reg_addr = NULL;
855 	size_t size;
856 	int rc;
857 
858 	pctrlr.regs = &regs;
859 	prepare_map_io_cmd(&pctrlr);
860 
861 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
862 	/* Ceil the current cmb vaddr and cmb size to 2MB_aligned */
863 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C00E00000);
864 	CU_ASSERT(size == 0x7FE00000);
865 
866 	rc = nvme_pcie_ctrlr_unmap_io_cmb(&pctrlr.ctrlr);
867 	CU_ASSERT(rc == 0);
868 	CU_ASSERT(pctrlr.cmb.mem_register_addr == NULL);
869 	CU_ASSERT(pctrlr.cmb.mem_register_size == 0);
870 
871 	/* cmb mem_register_addr not NULL */
872 	prepare_map_io_cmd(&pctrlr);
873 	pctrlr.cmb.mem_register_addr = (void *)0xDEADBEEF;
874 	pctrlr.cmb.mem_register_size = 1024;
875 
876 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
877 	CU_ASSERT(size == 1024);
878 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
879 
880 	/* cmb.bar_va is NULL */
881 	prepare_map_io_cmd(&pctrlr);
882 	pctrlr.cmb.bar_va = NULL;
883 
884 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
885 	CU_ASSERT(mem_reg_addr == NULL);
886 	CU_ASSERT(size == 0);
887 
888 	/* submission queue already used */
889 	prepare_map_io_cmd(&pctrlr);
890 	pctrlr.ctrlr.opts.use_cmb_sqs = true;
891 
892 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
893 	CU_ASSERT(mem_reg_addr == NULL);
894 	CU_ASSERT(size == 0);
895 
896 	pctrlr.ctrlr.opts.use_cmb_sqs = false;
897 
898 	/* Only SQS is supported */
899 	prepare_map_io_cmd(&pctrlr);
900 	cmbsz.bits.wds = 0;
901 	cmbsz.bits.rds = 0;
902 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw),
903 				  cmbsz.raw);
904 
905 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
906 	CU_ASSERT(mem_reg_addr == NULL);
907 	CU_ASSERT(size == 0);
908 
909 	/* CMB size is less than 4MB */
910 	prepare_map_io_cmd(&pctrlr);
911 	pctrlr.cmb.size = 1ULL << 16;
912 
913 	mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size);
914 	CU_ASSERT(mem_reg_addr == NULL);
915 	CU_ASSERT(size == 0);
916 }
917 
918 static void
919 test_nvme_pcie_ctrlr_map_unmap_pmr(void)
920 {
921 	struct nvme_pcie_ctrlr pctrlr = {};
922 	volatile struct spdk_nvme_registers regs = {};
923 	union spdk_nvme_pmrcap_register pmrcap = {};
924 	struct dev_mem_resource cmd_res = {};
925 	int rc;
926 
927 	pctrlr.regs = &regs;
928 	pctrlr.devhandle = (void *)&cmd_res;
929 	regs.cap.bits.pmrs = 1;
930 	cmd_res.addr = (void *)0x7F7C0080d000;
931 	cmd_res.len = 0x800000;
932 	cmd_res.phys_addr = 0xFC800000;
933 	pmrcap.bits.bir = 2;
934 	pmrcap.bits.cmss = 1;
935 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
936 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
937 				  pmrcap.raw);
938 
939 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
940 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
941 	/* Controller memory space enable, bit 1 */
942 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0xFC800002);
943 	CU_ASSERT(pctrlr.regs->pmrsts.raw == 0);
944 	CU_ASSERT(pctrlr.pmr.bar_va == (void *)0x7F7C0080d000);
945 	CU_ASSERT(pctrlr.pmr.bar_pa == 0xFC800000);
946 	CU_ASSERT(pctrlr.pmr.size == 0x800000);
947 
948 	rc = nvme_pcie_ctrlr_unmap_pmr(&pctrlr);
949 	CU_ASSERT(rc == 0);
950 	CU_ASSERT(pctrlr.regs->pmrmscu == 0);
951 	CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0);
952 
953 	/* pmrcap value invalid */
954 	memset(&pctrlr, 0, sizeof(pctrlr));
955 	memset((void *)&regs, 0, sizeof(regs));
956 	memset(&cmd_res, 0, sizeof(cmd_res));
957 
958 	pctrlr.regs = &regs;
959 	pctrlr.devhandle = (void *)&cmd_res;
960 	regs.cap.bits.pmrs = 1;
961 	cmd_res.addr = (void *)0x7F7C0080d000;
962 	cmd_res.len = 0x800000;
963 	cmd_res.phys_addr = 0xFC800000;
964 	pmrcap.raw = 0;
965 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr,
966 				  offsetof(struct spdk_nvme_registers, pmrcap.raw),
967 				  pmrcap.raw);
968 
969 	nvme_pcie_ctrlr_map_pmr(&pctrlr);
970 	CU_ASSERT(pctrlr.pmr.bar_va == NULL);
971 	CU_ASSERT(pctrlr.pmr.bar_pa == 0);
972 	CU_ASSERT(pctrlr.pmr.size == 0);
973 }
974 
975 static void
976 test_nvme_pcie_ctrlr_config_pmr(void)
977 {
978 	struct nvme_pcie_ctrlr pctrlr = {};
979 	union spdk_nvme_pmrcap_register pmrcap = {};
980 	union spdk_nvme_pmrsts_register pmrsts = {};
981 	union spdk_nvme_cap_register	cap = {};
982 	union spdk_nvme_pmrctl_register pmrctl = {};
983 	volatile struct spdk_nvme_registers regs = {};
984 	int rc;
985 
986 	/* pmrctl enable */
987 	pctrlr.regs = &regs;
988 	pmrcap.bits.pmrtu = 0;
989 	pmrcap.bits.pmrto = 1;
990 	pmrsts.bits.nrdy = false;
991 	pmrctl.bits.en = 0;
992 	cap.bits.pmrs = 1;
993 
994 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
995 	SPDK_CU_ASSERT_FATAL(rc == 0);
996 	rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cap.raw),
997 				       cap.raw);
998 	SPDK_CU_ASSERT_FATAL(rc == 0);
999 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1000 				       pmrcap.raw);
1001 	SPDK_CU_ASSERT_FATAL(rc == 0);
1002 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1003 				       pmrsts.raw);
1004 	SPDK_CU_ASSERT_FATAL(rc == 0);
1005 
1006 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, true);
1007 	CU_ASSERT(rc == 0);
1008 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1009 	CU_ASSERT(rc == 0);
1010 	CU_ASSERT(pmrctl.bits.en == true);
1011 
1012 	/* pmrctl disable */
1013 	pmrsts.bits.nrdy = true;
1014 	rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw),
1015 				       pmrsts.raw);
1016 	SPDK_CU_ASSERT_FATAL(rc == 0);
1017 	rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl);
1018 	SPDK_CU_ASSERT_FATAL(rc == 0);
1019 
1020 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1021 	CU_ASSERT(rc == 0);
1022 	rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl);
1023 	CU_ASSERT(rc == 0);
1024 	CU_ASSERT(pmrctl.bits.en == false);
1025 
1026 	/* configuration exist */
1027 	rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false);
1028 	CU_ASSERT(rc == -EINVAL);
1029 }
1030 
1031 static void
1032 map_io_pmr_init(struct nvme_pcie_ctrlr *pctrlr, union spdk_nvme_pmrcap_register *pmrcap)
1033 {
1034 	pmrcap->raw = 0;
1035 	pmrcap->bits.rds = 1;
1036 	pmrcap->bits.wds = 1;
1037 	nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1038 				  pmrcap->raw);
1039 	pctrlr->regs->cap.bits.pmrs = 1;
1040 	pctrlr->pmr.mem_register_size = 0;
1041 	pctrlr->pmr.mem_register_addr = NULL;
1042 	pctrlr->pmr.bar_va = (void *)0x7F7C00E30000;
1043 	pctrlr->pmr.size = (1 << 22) * 128;
1044 }
1045 
1046 static void
1047 test_nvme_pcie_ctrlr_map_io_pmr(void)
1048 {
1049 	struct nvme_pcie_ctrlr pctrlr = {};
1050 	struct spdk_nvme_ctrlr *ctrlr;
1051 	volatile struct spdk_nvme_registers regs = {};
1052 	union spdk_nvme_pmrcap_register pmrcap;
1053 	void *mem_reg_addr = NULL;
1054 	size_t rt_size = 0;
1055 
1056 	ctrlr = &pctrlr.ctrlr;
1057 	pctrlr.regs = &regs;
1058 
1059 	/* PMR is not supported by the controller */
1060 	map_io_pmr_init(&pctrlr, &pmrcap);
1061 	regs.cap.bits.pmrs = 0;
1062 
1063 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1064 	CU_ASSERT(mem_reg_addr == NULL);
1065 
1066 	/* mem_register_addr not NULL. */
1067 	map_io_pmr_init(&pctrlr, &pmrcap);
1068 	pctrlr.pmr.mem_register_addr = (void *)0xDEADBEEF;
1069 	pctrlr.pmr.mem_register_size = 1024;
1070 
1071 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1072 	CU_ASSERT(rt_size == 1024);
1073 	CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF);
1074 
1075 	/* PMR not available */
1076 	map_io_pmr_init(&pctrlr, &pmrcap);
1077 	pctrlr.pmr.bar_va = NULL;
1078 	pctrlr.pmr.mem_register_addr = NULL;
1079 
1080 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1081 	CU_ASSERT(mem_reg_addr == NULL);
1082 	CU_ASSERT(rt_size == 0);
1083 
1084 	/* WDS / RDS is not supported */
1085 	map_io_pmr_init(&pctrlr, &pmrcap);
1086 	pmrcap.bits.rds = 0;
1087 	pmrcap.bits.wds = 0;
1088 	nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw),
1089 				  pmrcap.raw);
1090 
1091 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1092 	CU_ASSERT(mem_reg_addr == NULL);
1093 	CU_ASSERT(rt_size == 0);
1094 
1095 	/* PMR is less than 4MiB in size then abort PMR mapping  */
1096 	map_io_pmr_init(&pctrlr, &pmrcap);
1097 	pctrlr.pmr.size = (1ULL << 20);
1098 
1099 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1100 	CU_ASSERT(mem_reg_addr == NULL);
1101 	CU_ASSERT(rt_size == 0);
1102 
1103 	/* All parameters success */
1104 	map_io_pmr_init(&pctrlr, &pmrcap);
1105 
1106 	mem_reg_addr = nvme_pcie_ctrlr_map_io_pmr(ctrlr, &rt_size);
1107 	CU_ASSERT(mem_reg_addr == (void *)0x7F7C01000000);
1108 	CU_ASSERT(rt_size == 0x1FE00000);
1109 }
1110 
1111 int
1112 main(int argc, char **argv)
1113 {
1114 	CU_pSuite	suite = NULL;
1115 	unsigned int	num_failures;
1116 
1117 	CU_initialize_registry();
1118 
1119 	suite = CU_add_suite("nvme_pcie", NULL, NULL);
1120 	CU_ADD_TEST(suite, test_prp_list_append);
1121 	CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
1122 	CU_ADD_TEST(suite, test_shadow_doorbell_update);
1123 	CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
1124 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_metadata);
1125 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_prps_sgl_request);
1126 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_hw_sgl_request);
1127 	CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_contig_request);
1128 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_regs_get_set);
1129 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_cmb);
1130 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_cmb);
1131 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_pmr);
1132 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_config_pmr);
1133 	CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_pmr);
1134 
1135 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1136 	CU_cleanup_registry();
1137 	return num_failures;
1138 }
1139