xref: /spdk/test/unit/lib/nvmf/vfio_user.c/vfio_user_ut.c (revision 927f1fd57bd004df581518466ec4c1b8083e5d23)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/stdinc.h"
34 #include "spdk_cunit.h"
35 #include "common/lib/test_env.c"
36 #include "nvmf/vfio_user.c"
37 #include "nvmf/transport.c"
38 
39 DEFINE_STUB(spdk_nvmf_ctrlr_get_regs, const struct spdk_nvmf_registers *,
40 	    (struct spdk_nvmf_ctrlr *ctrlr), NULL);
41 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
42 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
43 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
44 DEFINE_STUB_V(spdk_nvmf_request_exec_fabrics, (struct spdk_nvmf_request *req));
45 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
46 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
47 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
48 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
49 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
50 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
51 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
52 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
53 DEFINE_STUB(spdk_nvmf_subsystem_pause, int, (struct spdk_nvmf_subsystem *subsystem,
54 		uint32_t nsid, spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
55 DEFINE_STUB(spdk_nvmf_subsystem_resume, int, (struct spdk_nvmf_subsystem *subsystem,
56 		spdk_nvmf_subsystem_state_change_done cb_fn, void *cb_arg), 0);
57 DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
58 DEFINE_STUB(nvmf_ctrlr_async_event_error_event, int, (struct spdk_nvmf_ctrlr *ctrlr,
59 		union spdk_nvme_async_event_completion event), 0);
60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
61 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
62 		struct spdk_nvme_transport_id *trid), 0);
63 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
64 		const struct spdk_nvme_transport_id *trid2), 0);
65 DEFINE_STUB(nvmf_subsystem_get_ctrlr, struct spdk_nvmf_ctrlr *,
66 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), NULL);
67 DEFINE_STUB(nvmf_ctrlr_save_aers, int, (struct spdk_nvmf_ctrlr *ctrlr, uint16_t *aer_cids,
68 					uint16_t max_aers), 0);
69 DEFINE_STUB(nvmf_ctrlr_save_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
70 		struct nvmf_ctrlr_migr_data *data), 0);
71 DEFINE_STUB(nvmf_ctrlr_restore_migr_data, int, (struct spdk_nvmf_ctrlr *ctrlr,
72 		struct nvmf_ctrlr_migr_data *data), 0);
73 DEFINE_STUB_V(nvmf_ctrlr_set_fatal_status, (struct spdk_nvmf_ctrlr *ctrlr));
74 
75 static void *
76 gpa_to_vva(void *prv, uint64_t addr, uint64_t len, int prot)
77 {
78 	return (void *)(uintptr_t)addr;
79 }
80 
81 static void
82 test_nvme_cmd_map_prps(void)
83 {
84 	struct spdk_nvme_cmd cmd = {};
85 	struct iovec iovs[33];
86 	uint64_t phy_addr, *prp;
87 	uint32_t len;
88 	void *buf, *prps;
89 	int i, ret;
90 	size_t mps = 4096;
91 
92 	buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
93 	CU_ASSERT(buf != NULL);
94 	prps = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
95 	CU_ASSERT(prps != NULL);
96 
97 	/* test case 1: 4KiB with PRP1 only */
98 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf;
99 	len = 4096;
100 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
101 	CU_ASSERT(ret == 1);
102 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
103 	CU_ASSERT(iovs[0].iov_len == len);
104 
105 	/* test case 2: 4KiB with PRP1 and PRP2, 1KiB in first iov, and 3KiB in second iov */
106 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
107 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)buf + 4096;
108 	len = 4096;
109 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 1, len, mps, gpa_to_vva);
110 	CU_ASSERT(ret == -ERANGE);
111 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
112 	CU_ASSERT(ret == 2);
113 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
114 	CU_ASSERT(iovs[0].iov_len == 1024);
115 	CU_ASSERT(iovs[1].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp2);
116 	CU_ASSERT(iovs[1].iov_len == 1024 * 3);
117 
118 	/* test case 3: 128KiB with PRP list, 1KiB in first iov, 3KiB in last iov */
119 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
120 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
121 	len = 128 * 1024;
122 	prp = prps;
123 	for (i = 1; i < 33; i++) {
124 		*prp = (uint64_t)(uintptr_t)buf + i * 4096;
125 		prp++;
126 	}
127 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
128 	CU_ASSERT(ret == 33);
129 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)cmd.dptr.prp.prp1);
130 	CU_ASSERT(iovs[0].iov_len == 1024);
131 	for (i = 1; i < 32; i++) {
132 		CU_ASSERT(iovs[i].iov_base == (void *)((uintptr_t)buf + i * 4096));
133 		CU_ASSERT(iovs[i].iov_len == 4096);
134 	}
135 	CU_ASSERT(iovs[32].iov_base == (void *)((uintptr_t)buf + 32 * 4096));
136 	CU_ASSERT(iovs[32].iov_len == 1024 * 3);
137 
138 	/* test case 4: 256KiB with PRP list, not enough iovs */
139 	cmd.dptr.prp.prp1 = (uint64_t)(uintptr_t)buf + 1024 * 3;
140 	cmd.dptr.prp.prp2 = (uint64_t)(uintptr_t)prps;
141 	len = 256 * 1024;
142 	ret = nvme_cmd_map_prps(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
143 	CU_ASSERT(ret == -ERANGE);
144 
145 	spdk_free(buf);
146 	spdk_free(prps);
147 }
148 
149 static void
150 test_nvme_cmd_map_sgls(void)
151 {
152 	struct spdk_nvme_cmd cmd = {};
153 	struct iovec iovs[33];
154 	uint64_t phy_addr;
155 	uint32_t len;
156 	void *buf, *sgls;
157 	struct spdk_nvme_sgl_descriptor *sgl;
158 	int i, ret;
159 	size_t mps = 4096;
160 
161 	buf = spdk_zmalloc(132 * 1024, 4096, &phy_addr, 0, 0);
162 	CU_ASSERT(buf != NULL);
163 	sgls = spdk_zmalloc(4096, 4096, &phy_addr, 0, 0);
164 	CU_ASSERT(sgls != NULL);
165 
166 	/* test case 1: 8KiB with 1 data block */
167 	len = 8192;
168 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
169 	cmd.dptr.sgl1.unkeyed.length = len;
170 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)buf;
171 
172 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
173 	CU_ASSERT(ret == 1);
174 	CU_ASSERT(iovs[0].iov_base == buf);
175 	CU_ASSERT(iovs[0].iov_len == 8192);
176 
177 	/* test case 2: 8KiB with 2 data blocks and 1 last segment */
178 	sgl = (struct spdk_nvme_sgl_descriptor *)sgls;
179 	sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
180 	sgl[0].unkeyed.length = 2048;
181 	sgl[0].address = (uint64_t)(uintptr_t)buf;
182 	sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
183 	sgl[1].unkeyed.length = len - 2048;
184 	sgl[1].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
185 
186 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
187 	cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
188 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
189 
190 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
191 	CU_ASSERT(ret == 2);
192 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
193 	CU_ASSERT(iovs[0].iov_len == 2048);
194 	CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
195 	CU_ASSERT(iovs[1].iov_len == len - 2048);
196 
197 	/* test case 3: 8KiB with 1 segment, 1 last segment and 3 data blocks */
198 	sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
199 	sgl[0].unkeyed.length = 2048;
200 	sgl[0].address = (uint64_t)(uintptr_t)buf;
201 	sgl[1].unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
202 	sgl[1].unkeyed.length = 2 * sizeof(*sgl);
203 	sgl[1].address = (uint64_t)(uintptr_t)&sgl[9];
204 
205 	sgl[9].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
206 	sgl[9].unkeyed.length = 4096;
207 	sgl[9].address = (uint64_t)(uintptr_t)buf + 4 * 1024;
208 	sgl[10].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
209 	sgl[10].unkeyed.length = 2048;
210 	sgl[10].address = (uint64_t)(uintptr_t)buf + 16 * 1024;
211 
212 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_SEGMENT;
213 	cmd.dptr.sgl1.unkeyed.length = 2 * sizeof(*sgl);
214 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)&sgl[0];
215 
216 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 33, len, mps, gpa_to_vva);
217 	CU_ASSERT(ret == 3);
218 	CU_ASSERT(iovs[0].iov_base == (void *)(uintptr_t)buf);
219 	CU_ASSERT(iovs[0].iov_len == 2048);
220 	CU_ASSERT(iovs[1].iov_base == (void *)((uintptr_t)buf + 4 * 1024));
221 	CU_ASSERT(iovs[1].iov_len == 4096);
222 	CU_ASSERT(iovs[2].iov_base == (void *)((uintptr_t)buf + 16 * 1024));
223 	CU_ASSERT(iovs[2].iov_len == 2048);
224 
225 	/* test case 4: not enough iovs */
226 	len = 12 * 1024;
227 	for (i = 0; i < 6; i++) {
228 		sgl[0].unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
229 		sgl[0].unkeyed.length = 2048;
230 		sgl[0].address = (uint64_t)(uintptr_t)buf + i * 4096;
231 	}
232 
233 	cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
234 	cmd.dptr.sgl1.unkeyed.length = 6 * sizeof(*sgl);
235 	cmd.dptr.sgl1.address = (uint64_t)(uintptr_t)sgls;
236 
237 	ret = nvme_cmd_map_sgls(NULL, &cmd, iovs, 4, len, mps, gpa_to_vva);
238 	CU_ASSERT(ret == -ERANGE);
239 
240 	spdk_free(buf);
241 	spdk_free(sgls);
242 }
243 
244 static void
245 ut_transport_destroy_done_cb(void *cb_arg)
246 {
247 	int *done = cb_arg;
248 	*done = 1;
249 }
250 
251 static void
252 test_nvmf_vfio_user_create_destroy(void)
253 {
254 	struct spdk_nvmf_transport *transport = NULL;
255 	struct nvmf_vfio_user_transport *vu_transport = NULL;
256 	struct nvmf_vfio_user_endpoint *endpoint = NULL;
257 	struct spdk_nvmf_transport_opts opts = {};
258 	int rc;
259 	int done;
260 
261 	/* Initialize transport_specific NULL to avoid decoding json */
262 	opts.transport_specific = NULL;
263 
264 	transport = nvmf_vfio_user_create(&opts);
265 	CU_ASSERT(transport != NULL);
266 
267 	vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport,
268 					transport);
269 	/* Allocate a endpoint for destroy */
270 	endpoint = calloc(1, sizeof(*endpoint));
271 	pthread_mutex_init(&endpoint->lock, NULL);
272 	TAILQ_INSERT_TAIL(&vu_transport->endpoints, endpoint, link);
273 	done = 0;
274 
275 	rc = nvmf_vfio_user_destroy(transport, ut_transport_destroy_done_cb, &done);
276 	CU_ASSERT(rc == 0);
277 	CU_ASSERT(done == 1);
278 }
279 
280 int main(int argc, char **argv)
281 {
282 	CU_pSuite	suite = NULL;
283 	unsigned int	num_failures;
284 
285 	CU_set_error_action(CUEA_ABORT);
286 	CU_initialize_registry();
287 
288 	suite = CU_add_suite("vfio_user", NULL, NULL);
289 
290 	CU_ADD_TEST(suite, test_nvme_cmd_map_prps);
291 	CU_ADD_TEST(suite, test_nvme_cmd_map_sgls);
292 	CU_ADD_TEST(suite, test_nvmf_vfio_user_create_destroy);
293 
294 	CU_basic_set_mode(CU_BRM_VERBOSE);
295 	CU_basic_run_tests();
296 	num_failures = CU_get_number_of_failures();
297 	CU_cleanup_registry();
298 	return num_failures;
299 }
300