1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. All rights reserved. 3 */ 4 5 #include "spdk/stdinc.h" 6 #include "spdk_internal/cunit.h" 7 #include "common/lib/test_env.c" 8 #include "nvmf/vfio_user.c" 9 #include "nvmf/transport.c" 10 11 DEFINE_STUB(spdk_nvmf_ctrlr_get_regs, const struct spdk_nvmf_registers *, 12 (struct spdk_nvmf_ctrlr *ctrlr), NULL); 13 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0); 14 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0); 15 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 16 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0); 17 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 18 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0); 19 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 20 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 21 (const struct spdk_nvmf_subsystem *subsystem), NULL); 22 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512); 23 DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsystem, 24 uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0); 25 DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem, 26 spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0); 27 DEFINE_STUB_V(spdk_nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr)); 28 DEFINE_STUB(spdk_nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr, 29 enum spdk_nvme_async_event_info_error info), 0); 30 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 31 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair, 32 struct spdk_nvme_transport_id *trid), 0); 33 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1, 34 const struct spdk_nvme_transport_id *trid2), 0); 35 DEFINE_STUB(nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *, 36 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), NULL); 37 DEFINE_STUB_V(nvmf_ctrlr_set_fatal_status, (struct spdk_nvmf_ctrlr *ctrlr)); 38 DEFINE_STUB(spdk_nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr, 39 struct spdk_nvmf_ctrlr_migr_data *data), 0); 40 DEFINE_STUB(spdk_nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr, 41 const struct spdk_nvmf_ctrlr_migr_data *data), 0); 42 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL); 43 44 static void * 45 gpa_to_vva(void *prv, uint64_t addr, uint64_t len, uint32_t flags) 46 { 47 return (void *)(uintptr_t)addr; 48 } 49 50 static void 51 test_nvme_cmd_map_prps(void) 52 { 53 struct spdk_nvme_cmd cmd = {}; 54 struct iovec iovs[33]; 55 uint64_t phy_addr, *prp; 56 uint32_t len; 57 void *buf, *prps; 58 int i, ret; 59 size_t mps = 4096; 60 61 buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0); 62 CU_ASSERT(buf != NULL); 63 prps = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0); 64 CU_ASSERT(prps != NULL); 65 66 /* test case 1: 4KiB with PRP1 only */ 67 cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf; 68 len = 4096; 69 ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 70 CU_ASSERT(ret == 1); 71 CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1); 72 CU_ASSERT(iovs[0].iov_len == len); 73 74 /* test case 2: 4KiB with PRP1 and PRP2, 1KiB in first iov, and 3KiB in second iov */ 75 cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3; 76 cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)buf + 4096; 77 len = 4096; 78 ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 1, len, mps, gpa_to_vva); 79 CU_ASSERT(ret == -ERANGE); 80 ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 81 CU_ASSERT(ret == 2); 82 CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1); 83 CU_ASSERT(iovs[0].iov_len == 1024); 84 CU_ASSERT(iovs[1].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp2); 85 CU_ASSERT(iovs[1].iov_len == 1024 * 3); 86 87 /* test case 3: 128KiB with PRP list, 1KiB in first iov, 3KiB in last iov */ 88 cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3; 89 cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps; 90 len = 128 * 1024; 91 prp = prps; 92 for (i = 1; i < 33; i++) { 93 *prp = (uint64_t)(uintptr_t)buf + i * 4096; 94 prp++; 95 } 96 ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 97 CU_ASSERT(ret == 33); 98 CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1); 99 CU_ASSERT(iovs[0].iov_len == 1024); 100 for (i = 1; i < 32; i++) { 101 CU_ASSERT(iovs[i].iov_base == (void *)((uintptr_t)buf + i * 4096)); 102 CU_ASSERT(iovs[i].iov_len == 4096); 103 } 104 CU_ASSERT(iovs[32].iov_base == (void *)((uintptr_t)buf + 32 * 4096)); 105 CU_ASSERT(iovs[32].iov_len == 1024 * 3); 106 107 /* test case 4: 256KiB with PRP list, not enough iovs */ 108 cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3; 109 cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps; 110 len = 256 * 1024; 111 ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 112 CU_ASSERT(ret == -ERANGE); 113 114 spdk_free(buf); 115 spdk_free(prps); 116 } 117 118 static void 119 test_nvme_cmd_map_sgls(void) 120 { 121 struct spdk_nvme_cmd cmd = {}; 122 struct iovec iovs[33]; 123 uint64_t phy_addr; 124 uint32_t len; 125 void *buf, *sgls; 126 struct spdk_nvme_sgl_descriptor *sgl; 127 int i, ret; 128 size_t mps = 4096; 129 130 buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0); 131 CU_ASSERT(buf != NULL); 132 sgls = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0); 133 CU_ASSERT(sgls != NULL); 134 135 /* test case 1: 8KiB with 1 data block */ 136 len = 8192; 137 cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 138 cmd.dptr.sgl1.unkeyed.length = len; 139 cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)buf; 140 141 ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 142 CU_ASSERT(ret == 1); 143 CU_ASSERT(iovs[0].iov_base == buf); 144 CU_ASSERT(iovs[0].iov_len == 8192); 145 146 /* test case 2: 8KiB with 2 data blocks and 1 last segment */ 147 sgl = (struct spdk_nvme_sgl_descriptor *)sgls; 148 sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 149 sgl[0].unkeyed.length = 2048; 150 sgl[0].address = (uint64_t)(uintptr_t)buf; 151 sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 152 sgl[1].unkeyed.length = len - 2048; 153 sgl[1].address = (uint64_t)(uintptr_t)buf + 16 * 1024; 154 155 cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 156 cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl); 157 cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls; 158 159 ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 160 CU_ASSERT(ret == 2); 161 CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf); 162 CU_ASSERT(iovs[0].iov_len == 2048); 163 CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 16 * 1024)); 164 CU_ASSERT(iovs[1].iov_len == len - 2048); 165 166 /* test case 3: 8KiB with 1 segment, 1 last segment and 3 data blocks */ 167 sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 168 sgl[0].unkeyed.length = 2048; 169 sgl[0].address = (uint64_t)(uintptr_t)buf; 170 sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 171 sgl[1].unkeyed.length = 2 * sizeof(*sgl); 172 sgl[1].address = (uint64_t)(uintptr_t)&sgl[9]; 173 174 sgl[9].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 175 sgl[9].unkeyed.length = 4096; 176 sgl[9].address = (uint64_t)(uintptr_t)buf + 4 * 1024; 177 sgl[10].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 178 sgl[10].unkeyed.length = 2048; 179 sgl[10].address = (uint64_t)(uintptr_t)buf + 16 * 1024; 180 181 cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_SEGMENT; 182 cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl); 183 cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)&sgl[0]; 184 185 ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva); 186 CU_ASSERT(ret == 3); 187 CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf); 188 CU_ASSERT(iovs[0].iov_len == 2048); 189 CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 4 * 1024)); 190 CU_ASSERT(iovs[1].iov_len == 4096); 191 CU_ASSERT(iovs[2].iov_base == (void *)((uintptr_t)buf + 16 * 1024)); 192 CU_ASSERT(iovs[2].iov_len == 2048); 193 194 /* test case 4: not enough iovs */ 195 len = 12 * 1024; 196 for (i = 0; i < 6; i++) { 197 sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK; 198 sgl[0].unkeyed.length = 2048; 199 sgl[0].address = (uint64_t)(uintptr_t)buf + i * 4096; 200 } 201 202 cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT; 203 cmd.dptr.sgl1.unkeyed.length = 6 * sizeof(*sgl); 204 cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls; 205 206 ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 4, len, mps, gpa_to_vva); 207 CU_ASSERT(ret == -ERANGE); 208 209 spdk_free(buf); 210 spdk_free(sgls); 211 } 212 213 static void 214 ut_transport_destroy_done_cb(void *cb_arg) 215 { 216 int *done = cb_arg; 217 *done = 1; 218 } 219 220 static void 221 test_nvmf_vfio_user_create_destroy(void) 222 { 223 struct spdk_nvmf_transport *transport = NULL; 224 struct nvmf_vfio_user_transport *vu_transport = NULL; 225 struct nvmf_vfio_user_endpoint *endpoint = NULL; 226 struct spdk_nvmf_transport_opts opts = {}; 227 int rc; 228 int done; 229 230 /* Initialize transport_specific NULL to avoid decoding json */ 231 opts.transport_specific = NULL; 232 233 transport = nvmf_vfio_user_create(&opts); 234 CU_ASSERT(transport != NULL); 235 236 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, 237 transport); 238 /* Allocate a endpoint for destroy */ 239 endpoint = calloc(1, sizeof(*endpoint)); 240 pthread_mutex_init(&endpoint->lock, NULL); 241 TAILQ_INSERT_TAIL(&vu_transport->endpoints, endpoint, link); 242 done = 0; 243 244 rc = nvmf_vfio_user_destroy(transport, ut_transport_destroy_done_cb, &done); 245 CU_ASSERT(rc == 0); 246 CU_ASSERT(done == 1); 247 } 248 249 int 250 main(int argc, char **argv) 251 { 252 CU_pSuite suite = NULL; 253 unsigned int num_failures; 254 255 CU_initialize_registry(); 256 257 suite = CU_add_suite("vfio_user", NULL, NULL); 258 259 CU_ADD_TEST(suite, test_nvme_cmd_map_prps); 260 CU_ADD_TEST(suite, test_nvme_cmd_map_sgls); 261 CU_ADD_TEST(suite, test_nvmf_vfio_user_create_destroy); 262 263 num_failures = spdk_ut_run_tests(argc, argv, NULL); 264 CU_cleanup_registry(); 265 return num_failures; 266 } 267