xref: /spdk/test/unit/lib/nvmf/vfio_user.c/vfio_user_ut.c (revision c39647df83e4be9bcc49025132c48bf2414ef8b1)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/stdinc.h"
34 #include "spdk_cunit.h"
35 #include "common/lib/test_env.c"
36 #include "nvmf/vfio_user.c"
37 #include "nvmf/transport.c"
38 
39 DEFINE_STUB(spdk_nvmf_ctrlr_get_regs, const struct spdk_nvmf_registers *,
40 	    (struct spdk_nvmf_ctrlr *ctrlr), NULL);
41 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
42 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
43 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
44 DEFINE_STUB_V(spdk_nvmf_request_exec_fabrics, (struct spdk_nvmf_request *req));
45 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
46 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
47 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
48 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
49 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
50 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
51 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
52 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
53 DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsystem,
54 		uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
55 DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem,
56 		spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
57 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
58 DEFINE_STUB(nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
59 		union spdk_nvme_async_event_completion event), 0);
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
62 		struct spdk_nvme_transport_id *trid), 0);
63 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
64 		const struct spdk_nvme_transport_id *trid2), 0);
65 DEFINE_STUB(nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *,
66 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), NULL);
67 DEFINE_STUB(nvmf_ctrlr_save_aers, int, (struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
68 					uint16_t max_aers), 0);
69 DEFINE_STUB(nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
70 		struct nvmf_ctrlr_migr_data *data), 0);
71 DEFINE_STUB(nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
72 		struct nvmf_ctrlr_migr_data *data), 0);
73 
74 static void *
75 gpa_to_vva(void *prv, uint64_t addr, uint64_t len, int prot)
76 {
77 	return (void *)(uintptr_t)addr;
78 }
79 
80 static void
81 test_nvme_cmd_map_prps(void)
82 {
83 	struct spdk_nvme_cmd cmd = {};
84 	struct iovec iovs[33];
85 	uint64_t phy_addr, *prp;
86 	uint32_t len;
87 	void *buf, *prps;
88 	int i, ret;
89 	size_t mps = 4096;
90 
91 	buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
92 	CU_ASSERT(buf != NULL);
93 	prps = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
94 	CU_ASSERT(prps != NULL);
95 
96 	/* test case 1: 4KiB with PRP1 only */
97 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf;
98 	len = 4096;
99 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
100 	CU_ASSERT(ret == 1);
101 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
102 	CU_ASSERT(iovs[0].iov_len == len);
103 
104 	/* test case 2: 4KiB with PRP1 and PRP2, 1KiB in first iov, and 3KiB in second iov */
105 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
106 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)buf + 4096;
107 	len = 4096;
108 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 1, len, mps, gpa_to_vva);
109 	CU_ASSERT(ret == -ERANGE);
110 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
111 	CU_ASSERT(ret == 2);
112 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
113 	CU_ASSERT(iovs[0].iov_len == 1024);
114 	CU_ASSERT(iovs[1].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp2);
115 	CU_ASSERT(iovs[1].iov_len == 1024 * 3);
116 
117 	/* test case 3: 128KiB with PRP list, 1KiB in first iov, 3KiB in last iov */
118 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
119 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
120 	len = 128 * 1024;
121 	prp = prps;
122 	for (i = 1; i < 33; i++) {
123 		*prp = (uint64_t)(uintptr_t)buf + i * 4096;
124 		prp++;
125 	}
126 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
127 	CU_ASSERT(ret == 33);
128 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
129 	CU_ASSERT(iovs[0].iov_len == 1024);
130 	for (i = 1; i < 32; i++) {
131 		CU_ASSERT(iovs[i].iov_base == (void *)((uintptr_t)buf + i * 4096));
132 		CU_ASSERT(iovs[i].iov_len == 4096);
133 	}
134 	CU_ASSERT(iovs[32].iov_base == (void *)((uintptr_t)buf + 32 * 4096));
135 	CU_ASSERT(iovs[32].iov_len == 1024 * 3);
136 
137 	/* test case 4: 256KiB with PRP list, not enough iovs */
138 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
139 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
140 	len = 256 * 1024;
141 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
142 	CU_ASSERT(ret == -ERANGE);
143 
144 	spdk_free(buf);
145 	spdk_free(prps);
146 }
147 
148 static void
149 test_nvme_cmd_map_sgls(void)
150 {
151 	struct spdk_nvme_cmd cmd = {};
152 	struct iovec iovs[33];
153 	uint64_t phy_addr;
154 	uint32_t len;
155 	void *buf, *sgls;
156 	struct spdk_nvme_sgl_descriptor *sgl;
157 	int i, ret;
158 	size_t mps = 4096;
159 
160 	buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
161 	CU_ASSERT(buf != NULL);
162 	sgls = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
163 	CU_ASSERT(sgls != NULL);
164 
165 	/* test case 1: 8KiB with 1 data block */
166 	len = 8192;
167 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
168 	cmd.dptr.sgl1.unkeyed.length = len;
169 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)buf;
170 
171 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
172 	CU_ASSERT(ret == 1);
173 	CU_ASSERT(iovs[0].iov_base == buf);
174 	CU_ASSERT(iovs[0].iov_len == 8192);
175 
176 	/* test case 2: 8KiB with 2 data blocks and 1 last segment */
177 	sgl = (struct spdk_nvme_sgl_descriptor *)sgls;
178 	sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
179 	sgl[0].unkeyed.length = 2048;
180 	sgl[0].address = (uint64_t)(uintptr_t)buf;
181 	sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
182 	sgl[1].unkeyed.length = len - 2048;
183 	sgl[1].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
184 
185 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
186 	cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
187 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
188 
189 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
190 	CU_ASSERT(ret == 2);
191 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
192 	CU_ASSERT(iovs[0].iov_len == 2048);
193 	CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
194 	CU_ASSERT(iovs[1].iov_len == len - 2048);
195 
196 	/* test case 3: 8KiB with 1 segment, 1 last segment and 3 data blocks */
197 	sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
198 	sgl[0].unkeyed.length = 2048;
199 	sgl[0].address = (uint64_t)(uintptr_t)buf;
200 	sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
201 	sgl[1].unkeyed.length = 2 * sizeof(*sgl);
202 	sgl[1].address = (uint64_t)(uintptr_t)&sgl[9];
203 
204 	sgl[9].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
205 	sgl[9].unkeyed.length = 4096;
206 	sgl[9].address = (uint64_t)(uintptr_t)buf + 4 * 1024;
207 	sgl[10].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
208 	sgl[10].unkeyed.length = 2048;
209 	sgl[10].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
210 
211 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_SEGMENT;
212 	cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
213 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)&sgl[0];
214 
215 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
216 	CU_ASSERT(ret == 3);
217 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
218 	CU_ASSERT(iovs[0].iov_len == 2048);
219 	CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 4 * 1024));
220 	CU_ASSERT(iovs[1].iov_len == 4096);
221 	CU_ASSERT(iovs[2].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
222 	CU_ASSERT(iovs[2].iov_len == 2048);
223 
224 	/* test case 4: not enough iovs */
225 	len = 12 * 1024;
226 	for (i = 0; i < 6; i++) {
227 		sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
228 		sgl[0].unkeyed.length = 2048;
229 		sgl[0].address = (uint64_t)(uintptr_t)buf + i * 4096;
230 	}
231 
232 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
233 	cmd.dptr.sgl1.unkeyed.length = 6 * sizeof(*sgl);
234 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
235 
236 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 4, len, mps, gpa_to_vva);
237 	CU_ASSERT(ret == -ERANGE);
238 
239 	spdk_free(buf);
240 	spdk_free(sgls);
241 }
242 
243 static void
244 ut_transport_destroy_done_cb(void *cb_arg)
245 {
246 	int *done = cb_arg;
247 	*done = 1;
248 }
249 
250 static void
251 test_nvmf_vfio_user_create_destroy(void)
252 {
253 	struct spdk_nvmf_transport *transport = NULL;
254 	struct nvmf_vfio_user_transport *vu_transport = NULL;
255 	struct nvmf_vfio_user_endpoint *endpoint = NULL;
256 	struct spdk_nvmf_transport_opts opts = {};
257 	int rc;
258 	int done;
259 
260 	/* Initialize transport_specific NULL to avoid decoding json */
261 	opts.transport_specific = NULL;
262 
263 	transport = nvmf_vfio_user_create(&opts);
264 	CU_ASSERT(transport != NULL);
265 
266 	vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport,
267 					transport);
268 	/* Allocate a endpoint for destroy */
269 	endpoint = calloc(1, sizeof(*endpoint));
270 	pthread_mutex_init(&endpoint->lock, NULL);
271 	TAILQ_INSERT_TAIL(&vu_transport->endpoints, endpoint, link);
272 	done = 0;
273 
274 	rc = nvmf_vfio_user_destroy(transport, ut_transport_destroy_done_cb, &done);
275 	CU_ASSERT(rc == 0);
276 	CU_ASSERT(done == 1);
277 }
278 
279 int main(int argc, char **argv)
280 {
281 	CU_pSuite	suite = NULL;
282 	unsigned int	num_failures;
283 
284 	CU_set_error_action(CUEA_ABORT);
285 	CU_initialize_registry();
286 
287 	suite = CU_add_suite("vfio_user", NULL, NULL);
288 
289 	CU_ADD_TEST(suite, test_nvme_cmd_map_prps);
290 	CU_ADD_TEST(suite, test_nvme_cmd_map_sgls);
291 	CU_ADD_TEST(suite, test_nvmf_vfio_user_create_destroy);
292 
293 	CU_basic_set_mode(CU_BRM_VERBOSE);
294 	CU_basic_run_tests();
295 	num_failures = CU_get_number_of_failures();
296 	CU_cleanup_registry();
297 	return num_failures;
298 }
299