1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 37 #include "spdk_cunit.h" 38 39 #define UNIT_TEST_NO_VTOPHYS 40 41 #include "nvme/nvme_pcie.c" 42 #include "nvme/nvme_pcie_common.c" 43 #include "common/lib/nvme/common_stubs.h" 44 45 pid_t g_spdk_nvme_pid; 46 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0); 47 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0); 48 49 DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0); 50 51 DEFINE_STUB(nvme_wait_for_completion, int, 52 (struct spdk_nvme_qpair *qpair, 53 struct nvme_completion_poll_status *status), 0); 54 DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl)); 55 56 DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr, 57 struct nvme_request *req), 0); 58 DEFINE_STUB_V(nvme_ctrlr_free_processes, (struct spdk_nvme_ctrlr *ctrlr)); 59 DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *, 60 (struct spdk_nvme_ctrlr *ctrlr), NULL); 61 DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr), 62 0); 63 DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, 64 void *enum_ctx, struct spdk_pci_addr *pci_address), 0); 65 DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0); 66 DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev)); 67 DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device)); 68 DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value, 69 uint32_t offset), 0); 70 DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value, 71 uint32_t offset), 0); 72 DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0}); 73 DEFINE_STUB(spdk_pci_event_listen, int, (void), 0); 74 DEFINE_STUB(spdk_pci_register_error_handler, int, (spdk_pci_error_handler sighandler, void *ctx), 75 0); 76 DEFINE_STUB_V(spdk_pci_unregister_error_handler, (spdk_pci_error_handler sighandler)); 77 DEFINE_STUB(spdk_pci_enumerate, int, 78 (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx), 79 -1); 80 81 DEFINE_STUB(nvme_transport_get_name, const char *, (const struct spdk_nvme_transport *transport), 82 NULL); 83 84 SPDK_LOG_REGISTER_COMPONENT(nvme) 85 86 struct dev_mem_resource { 87 uint64_t phys_addr; 88 uint64_t len; 89 void *addr; 90 }; 91 92 struct nvme_pcie_ut_bdev_io { 93 struct iovec iovs[NVME_MAX_SGL_DESCRIPTORS]; 94 int iovpos; 95 }; 96 97 struct nvme_driver *g_spdk_nvme_driver = NULL; 98 99 int 100 spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar, 101 void **mapped_addr, uint64_t *phys_addr, uint64_t *size) 102 { 103 struct dev_mem_resource *dev_mem_res = (void *)dev; 104 105 *mapped_addr = dev_mem_res->addr; 106 *phys_addr = dev_mem_res->phys_addr; 107 *size = dev_mem_res->len; 108 109 return 0; 110 } 111 112 void 113 nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove) 114 { 115 CU_ASSERT(ctrlr != NULL); 116 if (hot_remove) { 117 ctrlr->is_removed = true; 118 } 119 120 ctrlr->is_failed = true; 121 } 122 123 static uint64_t g_vtophys_size = 0; 124 125 DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t); 126 uint64_t 127 spdk_vtophys(const void *buf, uint64_t *size) 128 { 129 if (size) { 130 *size = g_vtophys_size; 131 } 132 133 HANDLE_RETURN_MOCK(spdk_vtophys); 134 135 return (uintptr_t)buf; 136 } 137 138 DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {}); 139 DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid, 140 struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0); 141 DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false); 142 DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *, 143 (const struct spdk_nvme_transport_id *trid), NULL); 144 DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register, 145 (struct spdk_nvme_ctrlr *ctrlr), {}); 146 DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *, 147 (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL); 148 DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false); 149 DEFINE_STUB_V(nvme_ctrlr_process_async_event, (struct spdk_nvme_ctrlr *ctrlr, 150 const struct spdk_nvme_cpl *cpl)); 151 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair, 152 struct spdk_nvme_cmd *cmd)); 153 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair, 154 struct spdk_nvme_cpl *cpl)); 155 156 static void 157 prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index) 158 { 159 memset(req, 0, sizeof(*req)); 160 memset(tr, 0, sizeof(*tr)); 161 tr->req = req; 162 tr->prp_sgl_bus_addr = 0xDEADBEEF; 163 if (prp_index) { 164 *prp_index = 0; 165 } 166 } 167 168 static void 169 test_prp_list_append(void) 170 { 171 struct nvme_request req; 172 struct nvme_tracker tr; 173 struct spdk_nvme_ctrlr ctrlr = {}; 174 uint32_t prp_index; 175 176 ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 177 /* Non-DWORD-aligned buffer (invalid) */ 178 prp_list_prep(&tr, &req, &prp_index); 179 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100001, 0x1000, 180 0x1000) == -EFAULT); 181 182 /* 512-byte buffer, 4K aligned */ 183 prp_list_prep(&tr, &req, &prp_index); 184 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0); 185 CU_ASSERT(prp_index == 1); 186 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 187 188 /* 512-byte buffer, non-4K-aligned */ 189 prp_list_prep(&tr, &req, &prp_index); 190 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0); 191 CU_ASSERT(prp_index == 1); 192 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000); 193 194 /* 4K buffer, 4K aligned */ 195 prp_list_prep(&tr, &req, &prp_index); 196 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000, 197 0x1000) == 0); 198 CU_ASSERT(prp_index == 1); 199 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 200 201 /* 4K buffer, non-4K aligned */ 202 prp_list_prep(&tr, &req, &prp_index); 203 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000, 204 0x1000) == 0); 205 CU_ASSERT(prp_index == 2); 206 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800); 207 CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000); 208 209 /* 8K buffer, 4K aligned */ 210 prp_list_prep(&tr, &req, &prp_index); 211 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x2000, 212 0x1000) == 0); 213 CU_ASSERT(prp_index == 2); 214 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 215 CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000); 216 217 /* 8K buffer, non-4K aligned */ 218 prp_list_prep(&tr, &req, &prp_index); 219 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x2000, 220 0x1000) == 0); 221 CU_ASSERT(prp_index == 3); 222 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800); 223 CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr); 224 CU_ASSERT(tr.u.prp[0] == 0x101000); 225 CU_ASSERT(tr.u.prp[1] == 0x102000); 226 227 /* 12K buffer, 4K aligned */ 228 prp_list_prep(&tr, &req, &prp_index); 229 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x3000, 230 0x1000) == 0); 231 CU_ASSERT(prp_index == 3); 232 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 233 CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr); 234 CU_ASSERT(tr.u.prp[0] == 0x101000); 235 CU_ASSERT(tr.u.prp[1] == 0x102000); 236 237 /* 12K buffer, non-4K aligned */ 238 prp_list_prep(&tr, &req, &prp_index); 239 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x3000, 240 0x1000) == 0); 241 CU_ASSERT(prp_index == 4); 242 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800); 243 CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr); 244 CU_ASSERT(tr.u.prp[0] == 0x101000); 245 CU_ASSERT(tr.u.prp[1] == 0x102000); 246 CU_ASSERT(tr.u.prp[2] == 0x103000); 247 248 /* Two 4K buffers, both 4K aligned */ 249 prp_list_prep(&tr, &req, &prp_index); 250 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000, 251 0x1000) == 0); 252 CU_ASSERT(prp_index == 1); 253 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000, 254 0x1000) == 0); 255 CU_ASSERT(prp_index == 2); 256 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 257 CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000); 258 259 /* Two 4K buffers, first non-4K aligned, second 4K aligned */ 260 prp_list_prep(&tr, &req, &prp_index); 261 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000, 262 0x1000) == 0); 263 CU_ASSERT(prp_index == 2); 264 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900000, 0x1000, 265 0x1000) == 0); 266 CU_ASSERT(prp_index == 3); 267 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800); 268 CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr); 269 CU_ASSERT(tr.u.prp[0] == 0x101000); 270 CU_ASSERT(tr.u.prp[1] == 0x900000); 271 272 /* Two 4K buffers, both non-4K aligned (invalid) */ 273 prp_list_prep(&tr, &req, &prp_index); 274 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 0x1000, 275 0x1000) == 0); 276 CU_ASSERT(prp_index == 2); 277 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x900800, 0x1000, 278 0x1000) == -EFAULT); 279 CU_ASSERT(prp_index == 2); 280 281 /* 4K buffer, 4K aligned, but vtophys fails */ 282 MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR); 283 prp_list_prep(&tr, &req, &prp_index); 284 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 0x1000, 285 0x1000) == -EFAULT); 286 MOCK_CLEAR(spdk_vtophys); 287 288 /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */ 289 prp_list_prep(&tr, &req, &prp_index); 290 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 291 (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0); 292 CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1); 293 294 /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */ 295 prp_list_prep(&tr, &req, &prp_index); 296 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 297 NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0); 298 CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1); 299 300 /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */ 301 prp_list_prep(&tr, &req, &prp_index); 302 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100000, 303 (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT); 304 305 /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */ 306 prp_list_prep(&tr, &req, &prp_index); 307 CU_ASSERT(nvme_pcie_prp_list_append(&ctrlr, &tr, &prp_index, (void *)0x100800, 308 (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT); 309 } 310 311 struct spdk_event_entry { 312 struct spdk_pci_event event; 313 STAILQ_ENTRY(spdk_event_entry) link; 314 }; 315 316 static STAILQ_HEAD(, spdk_event_entry) g_events = STAILQ_HEAD_INITIALIZER(g_events); 317 static bool g_device_allowed = false; 318 319 int 320 spdk_pci_get_event(int fd, struct spdk_pci_event *event) 321 { 322 struct spdk_event_entry *entry; 323 324 if (STAILQ_EMPTY(&g_events)) { 325 return 0; 326 } 327 328 entry = STAILQ_FIRST(&g_events); 329 STAILQ_REMOVE_HEAD(&g_events, link); 330 331 *event = entry->event; 332 333 return 1; 334 } 335 336 int 337 spdk_pci_device_allow(struct spdk_pci_addr *pci_addr) 338 { 339 g_device_allowed = true; 340 341 return 0; 342 } 343 344 static void 345 test_nvme_pcie_hotplug_monitor(void) 346 { 347 struct nvme_pcie_ctrlr pctrlr = {}; 348 struct spdk_event_entry entry = {}; 349 struct nvme_driver driver; 350 pthread_mutexattr_t attr; 351 struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {}; 352 353 /* Initiate variables and ctrlr */ 354 driver.initialized = true; 355 driver.hotplug_fd = 123; 356 CU_ASSERT(pthread_mutexattr_init(&attr) == 0); 357 CU_ASSERT(pthread_mutex_init(&pctrlr.ctrlr.ctrlr_lock, &attr) == 0); 358 CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0); 359 TAILQ_INIT(&driver.shared_attached_ctrlrs); 360 g_spdk_nvme_driver = &driver; 361 362 /* Case 1: SPDK_NVME_UEVENT_ADD/ NVME_VFIO / NVME_UIO */ 363 entry.event.action = SPDK_UEVENT_ADD; 364 spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0"); 365 CU_ASSERT(STAILQ_EMPTY(&g_events)); 366 STAILQ_INSERT_TAIL(&g_events, &entry, link); 367 368 _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx); 369 370 CU_ASSERT(STAILQ_EMPTY(&g_events)); 371 CU_ASSERT(g_device_allowed == true); 372 g_device_allowed = false; 373 374 /* Case 2: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */ 375 entry.event.action = SPDK_UEVENT_REMOVE; 376 spdk_pci_addr_parse(&entry.event.traddr, "0000:05:00.0"); 377 CU_ASSERT(STAILQ_EMPTY(&g_events)); 378 STAILQ_INSERT_TAIL(&g_events, &entry, link); 379 380 MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr); 381 382 _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx); 383 384 CU_ASSERT(STAILQ_EMPTY(&g_events)); 385 CU_ASSERT(pctrlr.ctrlr.is_failed == true); 386 CU_ASSERT(pctrlr.ctrlr.is_removed == true); 387 pctrlr.ctrlr.is_failed = false; 388 pctrlr.ctrlr.is_removed = false; 389 MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe); 390 391 /* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO without event */ 392 pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 393 snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0"); 394 pctrlr.ctrlr.remove_cb = NULL; 395 pctrlr.ctrlr.is_failed = false; 396 pctrlr.ctrlr.is_removed = false; 397 TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq); 398 399 /* This should be set in the vfio req notifier cb */ 400 MOCK_SET(spdk_pci_device_is_removed, true); 401 402 _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx); 403 404 CU_ASSERT(STAILQ_EMPTY(&g_events)); 405 CU_ASSERT(pctrlr.ctrlr.is_failed == true); 406 CU_ASSERT(pctrlr.ctrlr.is_removed == true); 407 pctrlr.ctrlr.is_failed = false; 408 pctrlr.ctrlr.is_removed = false; 409 MOCK_CLEAR(spdk_pci_device_is_removed); 410 411 /* Case 4: Removed device detected in another process */ 412 MOCK_SET(spdk_pci_device_is_removed, false); 413 414 _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx); 415 416 CU_ASSERT(pctrlr.ctrlr.is_failed == false); 417 418 MOCK_SET(spdk_pci_device_is_removed, true); 419 420 _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx); 421 422 CU_ASSERT(pctrlr.ctrlr.is_failed == true); 423 424 pthread_mutex_destroy(&driver.lock); 425 pthread_mutex_destroy(&pctrlr.ctrlr.ctrlr_lock); 426 pthread_mutexattr_destroy(&attr); 427 g_spdk_nvme_driver = NULL; 428 } 429 430 static void test_shadow_doorbell_update(void) 431 { 432 bool ret; 433 434 /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */ 435 ret = nvme_pcie_qpair_need_event(10, 15, 14); 436 CU_ASSERT(ret == false); 437 438 ret = nvme_pcie_qpair_need_event(14, 15, 14); 439 CU_ASSERT(ret == true); 440 } 441 442 static void 443 test_build_contig_hw_sgl_request(void) 444 { 445 struct spdk_nvme_qpair qpair = {}; 446 struct nvme_request req = {}; 447 struct nvme_tracker tr = {}; 448 struct spdk_nvme_ctrlr ctrlr = {}; 449 int rc; 450 451 ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 452 qpair.ctrlr = &ctrlr; 453 /* Test 1: Payload covered by a single mapping */ 454 req.payload_size = 100; 455 req.payload = NVME_PAYLOAD_CONTIG(0, 0); 456 g_vtophys_size = 100; 457 MOCK_SET(spdk_vtophys, 0xDEADBEEF); 458 459 rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0); 460 CU_ASSERT(rc == 0); 461 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 462 CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF); 463 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100); 464 465 MOCK_CLEAR(spdk_vtophys); 466 g_vtophys_size = 0; 467 memset(&qpair, 0, sizeof(qpair)); 468 memset(&req, 0, sizeof(req)); 469 memset(&tr, 0, sizeof(tr)); 470 471 /* Test 2: Payload covered by a single mapping, but request is at an offset */ 472 qpair.ctrlr = &ctrlr; 473 req.payload_size = 100; 474 req.payload_offset = 50; 475 req.payload = NVME_PAYLOAD_CONTIG(0, 0); 476 g_vtophys_size = 1000; 477 MOCK_SET(spdk_vtophys, 0xDEADBEEF); 478 479 rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0); 480 CU_ASSERT(rc == 0); 481 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 482 CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF); 483 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100); 484 485 MOCK_CLEAR(spdk_vtophys); 486 g_vtophys_size = 0; 487 memset(&qpair, 0, sizeof(qpair)); 488 memset(&req, 0, sizeof(req)); 489 memset(&tr, 0, sizeof(tr)); 490 491 /* Test 3: Payload spans two mappings */ 492 qpair.ctrlr = &ctrlr; 493 req.payload_size = 100; 494 req.payload = NVME_PAYLOAD_CONTIG(0, 0); 495 g_vtophys_size = 60; 496 tr.prp_sgl_bus_addr = 0xFF0FF; 497 MOCK_SET(spdk_vtophys, 0xDEADBEEF); 498 499 rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0); 500 CU_ASSERT(rc == 0); 501 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT); 502 CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr); 503 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor)); 504 CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 505 CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60); 506 CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF); 507 CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 508 CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40); 509 CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF); 510 511 MOCK_CLEAR(spdk_vtophys); 512 g_vtophys_size = 0; 513 memset(&qpair, 0, sizeof(qpair)); 514 memset(&req, 0, sizeof(req)); 515 memset(&tr, 0, sizeof(tr)); 516 } 517 518 static void 519 test_nvme_pcie_qpair_build_metadata(void) 520 { 521 struct spdk_nvme_qpair qpair = {}; 522 struct nvme_tracker tr = {}; 523 struct nvme_request req = {}; 524 struct spdk_nvme_ctrlr ctrlr = {}; 525 int rc; 526 527 ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 528 tr.req = &req; 529 qpair.ctrlr = &ctrlr; 530 531 req.payload.md = (void *)0xDEADBEE0; 532 req.md_offset = 0; 533 req.md_size = 4096; 534 req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG; 535 tr.prp_sgl_bus_addr = 0xDBADBEEF; 536 MOCK_SET(spdk_vtophys, 0xDCADBEE0); 537 538 rc = nvme_pcie_qpair_build_metadata(&qpair, &tr, true, true); 539 CU_ASSERT(rc == 0); 540 CU_ASSERT(req.cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_SGL); 541 CU_ASSERT(tr.meta_sgl.address == 0xDCADBEE0); 542 CU_ASSERT(tr.meta_sgl.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 543 CU_ASSERT(tr.meta_sgl.unkeyed.length == 4096); 544 CU_ASSERT(tr.meta_sgl.unkeyed.subtype == 0); 545 CU_ASSERT(req.cmd.mptr == (0xDBADBEEF - sizeof(struct spdk_nvme_sgl_descriptor))); 546 MOCK_CLEAR(spdk_vtophys); 547 548 /* Build non sgl metadata */ 549 MOCK_SET(spdk_vtophys, 0xDDADBEE0); 550 551 rc = nvme_pcie_qpair_build_metadata(&qpair, &tr, false, true); 552 CU_ASSERT(rc == 0); 553 CU_ASSERT(req.cmd.mptr == 0xDDADBEE0); 554 MOCK_CLEAR(spdk_vtophys); 555 } 556 557 static int 558 nvme_pcie_ut_next_sge(void *cb_arg, void **address, uint32_t *length) 559 { 560 struct nvme_pcie_ut_bdev_io *bio = cb_arg; 561 struct iovec *iov; 562 563 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS); 564 565 iov = &bio->iovs[bio->iovpos]; 566 567 *address = iov->iov_base; 568 *length = iov->iov_len; 569 bio->iovpos++; 570 571 return 0; 572 } 573 574 static void 575 nvme_pcie_ut_reset_sgl(void *cb_arg, uint32_t offset) 576 { 577 struct nvme_pcie_ut_bdev_io *bio = cb_arg; 578 struct iovec *iov; 579 580 for (bio->iovpos = 0; bio->iovpos < NVME_MAX_SGL_DESCRIPTORS; bio->iovpos++) { 581 iov = &bio->iovs[bio->iovpos]; 582 /* Offset must be aligned with the start of any SGL entry */ 583 if (offset == 0) { 584 break; 585 } 586 587 SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len); 588 offset -= iov->iov_len; 589 } 590 591 SPDK_CU_ASSERT_FATAL(offset == 0); 592 SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_MAX_SGL_DESCRIPTORS); 593 } 594 595 static void 596 test_nvme_pcie_qpair_build_prps_sgl_request(void) 597 { 598 struct spdk_nvme_qpair qpair = {}; 599 struct nvme_request req = {}; 600 struct nvme_tracker tr = {}; 601 struct spdk_nvme_ctrlr ctrlr = {}; 602 struct nvme_pcie_ut_bdev_io bio = {}; 603 int rc; 604 605 tr.req = &req; 606 qpair.ctrlr = &ctrlr; 607 req.payload.contig_or_cb_arg = &bio; 608 609 req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl; 610 req.payload.next_sge_fn = nvme_pcie_ut_next_sge; 611 req.payload_size = 4096; 612 ctrlr.page_size = 4096; 613 bio.iovs[0].iov_base = (void *)0x100000; 614 bio.iovs[0].iov_len = 4096; 615 616 rc = nvme_pcie_qpair_build_prps_sgl_request(&qpair, &req, &tr, NULL); 617 CU_ASSERT(rc == 0); 618 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 619 } 620 621 static void 622 test_nvme_pcie_qpair_build_hw_sgl_request(void) 623 { 624 struct spdk_nvme_qpair qpair = {}; 625 struct nvme_request req = {}; 626 struct nvme_tracker tr = {}; 627 struct nvme_pcie_ut_bdev_io bio = {}; 628 struct spdk_nvme_ctrlr ctrlr = {}; 629 int rc; 630 631 ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE; 632 qpair.ctrlr = &ctrlr; 633 req.payload.contig_or_cb_arg = &bio; 634 req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl; 635 req.payload.next_sge_fn = nvme_pcie_ut_next_sge; 636 req.cmd.opc = SPDK_NVME_OPC_WRITE; 637 tr.prp_sgl_bus_addr = 0xDAADBEE0; 638 g_vtophys_size = 4096; 639 640 /* Multiple vectors, 2k + 4k + 2k */ 641 req.payload_size = 8192; 642 bio.iovpos = 3; 643 bio.iovs[0].iov_base = (void *)0xDBADBEE0; 644 bio.iovs[0].iov_len = 2048; 645 bio.iovs[1].iov_base = (void *)0xDCADBEE0; 646 bio.iovs[1].iov_len = 4096; 647 bio.iovs[2].iov_base = (void *)0xDDADBEE0; 648 bio.iovs[2].iov_len = 2048; 649 650 rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true); 651 CU_ASSERT(rc == 0); 652 CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 653 CU_ASSERT(tr.u.sgl[0].unkeyed.length == 2048); 654 CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0); 655 CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0); 656 CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 657 CU_ASSERT(tr.u.sgl[1].unkeyed.length == 4096); 658 CU_ASSERT(tr.u.sgl[1].address == 0xDCADBEE0); 659 CU_ASSERT(tr.u.sgl[2].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 660 CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048); 661 CU_ASSERT(tr.u.sgl[2].unkeyed.length == 2048); 662 CU_ASSERT(tr.u.sgl[2].address == 0xDDADBEE0); 663 CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG); 664 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0); 665 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT); 666 CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDAADBEE0); 667 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 48); 668 669 /* Single vector */ 670 memset(&tr, 0, sizeof(tr)); 671 memset(&bio, 0, sizeof(bio)); 672 memset(&req, 0, sizeof(req)); 673 req.payload.contig_or_cb_arg = &bio; 674 req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl; 675 req.payload.next_sge_fn = nvme_pcie_ut_next_sge; 676 req.cmd.opc = SPDK_NVME_OPC_WRITE; 677 req.payload_size = 4096; 678 bio.iovpos = 1; 679 bio.iovs[0].iov_base = (void *)0xDBADBEE0; 680 bio.iovs[0].iov_len = 4096; 681 682 rc = nvme_pcie_qpair_build_hw_sgl_request(&qpair, &req, &tr, true); 683 CU_ASSERT(rc == 0); 684 CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 685 CU_ASSERT(tr.u.sgl[0].unkeyed.length == 4096); 686 CU_ASSERT(tr.u.sgl[0].address == 0xDBADBEE0); 687 CU_ASSERT(tr.u.sgl[0].unkeyed.subtype == 0); 688 CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG); 689 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == 0); 690 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK); 691 CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDBADBEE0); 692 CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4096); 693 } 694 695 static void 696 test_nvme_pcie_qpair_build_contig_request(void) 697 { 698 struct nvme_pcie_qpair pqpair = {}; 699 struct nvme_request req = {}; 700 struct nvme_tracker tr = {}; 701 struct spdk_nvme_ctrlr ctrlr = {}; 702 int rc; 703 704 pqpair.qpair.ctrlr = &ctrlr; 705 ctrlr.page_size = 0x1000; 706 707 /* 1 prp, 4k-aligned */ 708 prp_list_prep(&tr, &req, NULL); 709 req.payload_size = 0x1000; 710 req.payload.contig_or_cb_arg = (void *)0x100000; 711 712 rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true); 713 CU_ASSERT(rc == 0); 714 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 715 716 /* 2 prps, non-4K-aligned */ 717 prp_list_prep(&tr, &req, NULL); 718 req.payload_size = 0x1000; 719 req.payload_offset = 0x800; 720 req.payload.contig_or_cb_arg = (void *)0x100000; 721 722 rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true); 723 CU_ASSERT(rc == 0); 724 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800); 725 CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000); 726 727 /* 3 prps, 4k-aligned */ 728 prp_list_prep(&tr, &req, NULL); 729 req.payload_size = 0x3000; 730 req.payload.contig_or_cb_arg = (void *)0x100000; 731 732 rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true); 733 CU_ASSERT(rc == 0); 734 CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000); 735 CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr); 736 CU_ASSERT(tr.u.prp[0] == 0x101000); 737 CU_ASSERT(tr.u.prp[1] == 0x102000); 738 739 /* address not dword aligned */ 740 prp_list_prep(&tr, &req, NULL); 741 req.payload_size = 0x3000; 742 req.payload.contig_or_cb_arg = (void *)0x100001; 743 req.qpair = &pqpair.qpair; 744 TAILQ_INIT(&pqpair.outstanding_tr); 745 TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list); 746 747 rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true); 748 CU_ASSERT(rc == -EFAULT); 749 } 750 751 static void 752 test_nvme_pcie_ctrlr_regs_get_set(void) 753 { 754 struct nvme_pcie_ctrlr pctrlr = {}; 755 volatile struct spdk_nvme_registers regs = {}; 756 uint32_t value_4; 757 uint64_t value_8; 758 int rc; 759 760 pctrlr.regs = ®s; 761 762 rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 8, 4); 763 CU_ASSERT(rc == 0); 764 765 rc = nvme_pcie_ctrlr_get_reg_4(&pctrlr.ctrlr, 8, &value_4); 766 CU_ASSERT(rc == 0); 767 CU_ASSERT(value_4 == 4); 768 769 rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, 0, 0x100000000); 770 CU_ASSERT(rc == 0); 771 772 rc = nvme_pcie_ctrlr_get_reg_8(&pctrlr.ctrlr, 0, &value_8); 773 CU_ASSERT(rc == 0); 774 CU_ASSERT(value_8 == 0x100000000); 775 } 776 777 static void 778 test_nvme_pcie_ctrlr_map_unmap_cmb(void) 779 { 780 struct nvme_pcie_ctrlr pctrlr = {}; 781 volatile struct spdk_nvme_registers regs = {}; 782 union spdk_nvme_cmbsz_register cmbsz = {}; 783 union spdk_nvme_cmbloc_register cmbloc = {}; 784 struct dev_mem_resource cmd_res = {}; 785 int rc; 786 787 pctrlr.regs = ®s; 788 pctrlr.devhandle = (void *)&cmd_res; 789 cmd_res.addr = (void *)0x7f7c0080d000; 790 cmd_res.len = 0x800000; 791 cmd_res.phys_addr = 0xFC800000; 792 /* Configure cmb size with unit size 4k, offset 100, unsupported SQ */ 793 cmbsz.bits.sz = 512; 794 cmbsz.bits.szu = 0; 795 cmbsz.bits.sqs = 0; 796 cmbloc.bits.bir = 0; 797 cmbloc.bits.ofst = 100; 798 799 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 800 cmbsz.raw); 801 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 802 cmbloc.raw); 803 804 nvme_pcie_ctrlr_map_cmb(&pctrlr); 805 CU_ASSERT(pctrlr.cmb.bar_va == (void *)0x7f7c0080d000); 806 CU_ASSERT(pctrlr.cmb.bar_pa == 0xFC800000); 807 CU_ASSERT(pctrlr.cmb.size == 512 * 4096); 808 CU_ASSERT(pctrlr.cmb.current_offset == 4096 * 100); 809 CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false); 810 811 rc = nvme_pcie_ctrlr_unmap_cmb(&pctrlr); 812 CU_ASSERT(rc == 0); 813 814 /* Invalid mapping information */ 815 memset(&pctrlr.cmb, 0, sizeof(pctrlr.cmb)); 816 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 0); 817 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 0); 818 819 nvme_pcie_ctrlr_map_cmb(&pctrlr); 820 CU_ASSERT(pctrlr.cmb.bar_va == NULL); 821 CU_ASSERT(pctrlr.cmb.bar_pa == 0); 822 CU_ASSERT(pctrlr.cmb.size == 0); 823 CU_ASSERT(pctrlr.cmb.current_offset == 0); 824 CU_ASSERT(pctrlr.ctrlr.opts.use_cmb_sqs == false); 825 } 826 827 828 static void 829 prepare_map_io_cmd(struct nvme_pcie_ctrlr *pctrlr) 830 { 831 union spdk_nvme_cmbsz_register cmbsz = {}; 832 union spdk_nvme_cmbloc_register cmbloc = {}; 833 834 cmbsz.bits.sz = 512; 835 cmbsz.bits.wds = 1; 836 cmbsz.bits.rds = 1; 837 838 nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 839 cmbsz.raw); 840 nvme_pcie_ctrlr_set_reg_4(&pctrlr->ctrlr, offsetof(struct spdk_nvme_registers, cmbloc.raw), 841 cmbloc.raw); 842 843 pctrlr->cmb.bar_va = (void *)0x7F7C0080D000; 844 pctrlr->cmb.bar_pa = 0xFC800000; 845 pctrlr->cmb.current_offset = 1ULL << 22; 846 pctrlr->cmb.size = (1ULL << 22) * 512; 847 pctrlr->cmb.mem_register_addr = NULL; 848 pctrlr->ctrlr.opts.use_cmb_sqs = false; 849 } 850 851 static void 852 test_nvme_pcie_ctrlr_map_io_cmb(void) 853 { 854 struct nvme_pcie_ctrlr pctrlr = {}; 855 volatile struct spdk_nvme_registers regs = {}; 856 union spdk_nvme_cmbsz_register cmbsz = {}; 857 void *mem_reg_addr = NULL; 858 size_t size; 859 int rc; 860 861 pctrlr.regs = ®s; 862 prepare_map_io_cmd(&pctrlr); 863 864 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 865 /* Ceil the current cmb vaddr and cmb size to 2MB_aligned */ 866 CU_ASSERT(mem_reg_addr == (void *)0x7F7C00E00000); 867 CU_ASSERT(size == 0x7FE00000); 868 869 rc = nvme_pcie_ctrlr_unmap_io_cmb(&pctrlr.ctrlr); 870 CU_ASSERT(rc == 0); 871 CU_ASSERT(pctrlr.cmb.mem_register_addr == NULL); 872 CU_ASSERT(pctrlr.cmb.mem_register_size == 0); 873 874 /* cmb mem_register_addr not NULL */ 875 prepare_map_io_cmd(&pctrlr); 876 pctrlr.cmb.mem_register_addr = (void *)0xDEADBEEF; 877 pctrlr.cmb.mem_register_size = 1024; 878 879 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 880 CU_ASSERT(size == 1024); 881 CU_ASSERT(mem_reg_addr == (void *)0xDEADBEEF); 882 883 /* cmb.bar_va is NULL */ 884 prepare_map_io_cmd(&pctrlr); 885 pctrlr.cmb.bar_va = NULL; 886 887 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 888 CU_ASSERT(mem_reg_addr == NULL); 889 CU_ASSERT(size == 0); 890 891 /* submission queue already used */ 892 prepare_map_io_cmd(&pctrlr); 893 pctrlr.ctrlr.opts.use_cmb_sqs = true; 894 895 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 896 CU_ASSERT(mem_reg_addr == NULL); 897 CU_ASSERT(size == 0); 898 899 pctrlr.ctrlr.opts.use_cmb_sqs = false; 900 901 /* Only SQS is supported */ 902 prepare_map_io_cmd(&pctrlr); 903 cmbsz.bits.wds = 0; 904 cmbsz.bits.rds = 0; 905 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cmbsz.raw), 906 cmbsz.raw); 907 908 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 909 CU_ASSERT(mem_reg_addr == NULL); 910 CU_ASSERT(size == 0); 911 912 /* CMB size is less than 4MB */ 913 prepare_map_io_cmd(&pctrlr); 914 pctrlr.cmb.size = 1ULL << 16; 915 916 mem_reg_addr = nvme_pcie_ctrlr_map_io_cmb(&pctrlr.ctrlr, &size); 917 CU_ASSERT(mem_reg_addr == NULL); 918 CU_ASSERT(size == 0); 919 } 920 921 static void 922 test_nvme_pcie_ctrlr_map_unmap_pmr(void) 923 { 924 struct nvme_pcie_ctrlr pctrlr = {}; 925 volatile struct spdk_nvme_registers regs = {}; 926 union spdk_nvme_pmrcap_register pmrcap = {}; 927 struct dev_mem_resource cmd_res = {}; 928 int rc; 929 930 pctrlr.regs = ®s; 931 pctrlr.devhandle = (void *)&cmd_res; 932 regs.cap.bits.pmrs = 1; 933 cmd_res.addr = (void *)0x7F7C0080d000; 934 cmd_res.len = 0x800000; 935 cmd_res.phys_addr = 0xFC800000; 936 pmrcap.bits.bir = 2; 937 pmrcap.bits.cmss = 1; 938 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 939 offsetof(struct spdk_nvme_registers, pmrcap.raw), 940 pmrcap.raw); 941 942 nvme_pcie_ctrlr_map_pmr(&pctrlr); 943 CU_ASSERT(pctrlr.regs->pmrmscu == 0); 944 /* Controller memory space enable, bit 1 */ 945 CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0xFC800002); 946 CU_ASSERT(pctrlr.regs->pmrsts.raw == 0); 947 CU_ASSERT(pctrlr.pmr.bar_va == (void *)0x7F7C0080d000); 948 CU_ASSERT(pctrlr.pmr.bar_pa == 0xFC800000); 949 CU_ASSERT(pctrlr.pmr.size == 0x800000); 950 951 rc = nvme_pcie_ctrlr_unmap_pmr(&pctrlr); 952 CU_ASSERT(rc == 0); 953 CU_ASSERT(pctrlr.regs->pmrmscu == 0); 954 CU_ASSERT(pctrlr.regs->pmrmscl.raw == 0); 955 956 /* pmrcap value invalid */ 957 memset(&pctrlr, 0, sizeof(pctrlr)); 958 memset((void *)®s, 0, sizeof(regs)); 959 memset(&cmd_res, 0, sizeof(cmd_res)); 960 961 pctrlr.regs = ®s; 962 pctrlr.devhandle = (void *)&cmd_res; 963 regs.cap.bits.pmrs = 1; 964 cmd_res.addr = (void *)0x7F7C0080d000; 965 cmd_res.len = 0x800000; 966 cmd_res.phys_addr = 0xFC800000; 967 pmrcap.raw = 0; 968 nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, 969 offsetof(struct spdk_nvme_registers, pmrcap.raw), 970 pmrcap.raw); 971 972 nvme_pcie_ctrlr_map_pmr(&pctrlr); 973 CU_ASSERT(pctrlr.pmr.bar_va == NULL); 974 CU_ASSERT(pctrlr.pmr.bar_pa == 0); 975 CU_ASSERT(pctrlr.pmr.size == 0); 976 } 977 978 static void 979 test_nvme_pcie_ctrlr_config_pmr(void) 980 { 981 struct nvme_pcie_ctrlr pctrlr = {}; 982 union spdk_nvme_pmrcap_register pmrcap = {}; 983 union spdk_nvme_pmrsts_register pmrsts = {}; 984 union spdk_nvme_cap_register cap = {}; 985 union spdk_nvme_pmrctl_register pmrctl = {}; 986 volatile struct spdk_nvme_registers regs = {}; 987 int rc; 988 989 /* pmrctl enable */ 990 pctrlr.regs = ®s; 991 pmrcap.bits.pmrtu = 0; 992 pmrcap.bits.pmrto = 1; 993 pmrsts.bits.nrdy = false; 994 pmrctl.bits.en = 0; 995 cap.bits.pmrs = 1; 996 997 rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl); 998 SPDK_CU_ASSERT_FATAL(rc == 0); 999 rc = nvme_pcie_ctrlr_set_reg_8(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, cap.raw), 1000 cap.raw); 1001 SPDK_CU_ASSERT_FATAL(rc == 0); 1002 rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrcap.raw), 1003 pmrcap.raw); 1004 SPDK_CU_ASSERT_FATAL(rc == 0); 1005 rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw), 1006 pmrsts.raw); 1007 SPDK_CU_ASSERT_FATAL(rc == 0); 1008 1009 rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, true); 1010 CU_ASSERT(rc == 0); 1011 rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl); 1012 CU_ASSERT(rc == 0); 1013 CU_ASSERT(pmrctl.bits.en == true); 1014 1015 /* pmrctl disable */ 1016 pmrsts.bits.nrdy = true; 1017 rc = nvme_pcie_ctrlr_set_reg_4(&pctrlr.ctrlr, offsetof(struct spdk_nvme_registers, pmrsts.raw), 1018 pmrsts.raw); 1019 SPDK_CU_ASSERT_FATAL(rc == 0); 1020 rc = nvme_pcie_ctrlr_set_pmrctl(&pctrlr, &pmrctl); 1021 SPDK_CU_ASSERT_FATAL(rc == 0); 1022 1023 rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false); 1024 CU_ASSERT(rc == 0); 1025 rc = nvme_pcie_ctrlr_get_pmrctl(&pctrlr, &pmrctl); 1026 CU_ASSERT(rc == 0); 1027 CU_ASSERT(pmrctl.bits.en == false); 1028 1029 /* configuration exist */ 1030 rc = nvme_pcie_ctrlr_config_pmr(&pctrlr.ctrlr, false); 1031 CU_ASSERT(rc == -EINVAL); 1032 } 1033 1034 int main(int argc, char **argv) 1035 { 1036 CU_pSuite suite = NULL; 1037 unsigned int num_failures; 1038 1039 CU_set_error_action(CUEA_ABORT); 1040 CU_initialize_registry(); 1041 1042 suite = CU_add_suite("nvme_pcie", NULL, NULL); 1043 CU_ADD_TEST(suite, test_prp_list_append); 1044 CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor); 1045 CU_ADD_TEST(suite, test_shadow_doorbell_update); 1046 CU_ADD_TEST(suite, test_build_contig_hw_sgl_request); 1047 CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_metadata); 1048 CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_prps_sgl_request); 1049 CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_hw_sgl_request); 1050 CU_ADD_TEST(suite, test_nvme_pcie_qpair_build_contig_request); 1051 CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_regs_get_set); 1052 CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_cmb); 1053 CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_io_cmb); 1054 CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_map_unmap_pmr); 1055 CU_ADD_TEST(suite, test_nvme_pcie_ctrlr_config_pmr); 1056 1057 CU_basic_set_mode(CU_BRM_VERBOSE); 1058 CU_basic_run_tests(); 1059 num_failures = CU_get_number_of_failures(); 1060 CU_cleanup_registry(); 1061 return num_failures; 1062 } 1063