xref: /spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c (revision 6cebe9d06b14ad173e45d2b9be49b04f64b5fba3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/mock.h"
39 
40 #include "nvmf/ctrlr_bdev.c"
41 
42 
43 SPDK_LOG_REGISTER_COMPONENT(nvmf)
44 
45 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
46 
47 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
48 
49 DEFINE_STUB(spdk_bdev_get_acwu, uint16_t, (const struct spdk_bdev *bdev), 0);
50 
51 DEFINE_STUB(spdk_bdev_get_data_block_size, uint32_t,
52 	    (const struct spdk_bdev *bdev), 512);
53 
54 DEFINE_STUB(spdk_bdev_get_physical_block_size, uint32_t,
55 	    (const struct spdk_bdev *bdev), 4096);
56 
57 DEFINE_STUB(nvmf_ctrlr_process_admin_cmd, int, (struct spdk_nvmf_request *req), 0);
58 
59 DEFINE_STUB(spdk_bdev_comparev_blocks, int, (struct spdk_bdev_desc *desc,
60 		struct spdk_io_channel *ch, struct iovec *iov, int iovcnt,
61 		uint64_t offset_blocks, uint64_t num_blocks,
62 		spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
63 
64 DEFINE_STUB(spdk_bdev_nvme_admin_passthru, int,
65 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
66 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
67 	     spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
68 
69 DEFINE_STUB(spdk_bdev_abort, int,
70 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
71 	     void *bio_cb_arg, spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
72 
73 struct spdk_bdev {
74 	uint32_t blocklen;
75 	uint64_t num_blocks;
76 	uint32_t md_len;
77 };
78 
79 uint32_t
80 spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
81 {
82 	return bdev->blocklen;
83 }
84 
85 uint64_t
86 spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
87 {
88 	return bdev->num_blocks;
89 }
90 
91 uint32_t
92 spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
93 {
94 	abort();
95 	return 0;
96 }
97 
98 uint32_t
99 spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
100 {
101 	return bdev->md_len;
102 }
103 
104 DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
105 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
106 	     struct iovec *compare_iov, int compare_iovcnt,
107 	     struct iovec *write_iov, int write_iovcnt,
108 	     uint64_t offset_blocks, uint64_t num_blocks,
109 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
110 	    0);
111 
112 DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
113 
114 DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
115 		uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
116 
117 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
118 
119 DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
120 	    (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
121 
122 DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
123 
124 DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
125 	    (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
126 
127 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
128 	    (struct spdk_bdev_desc *desc), NULL);
129 
130 DEFINE_STUB(spdk_bdev_flush_blocks, int,
131 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
132 	     uint64_t offset_blocks, uint64_t num_blocks,
133 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
134 	    0);
135 
136 DEFINE_STUB(spdk_bdev_unmap_blocks, int,
137 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
138 	     uint64_t offset_blocks, uint64_t num_blocks,
139 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
140 	    0);
141 
142 DEFINE_STUB(spdk_bdev_io_type_supported, bool,
143 	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
144 
145 DEFINE_STUB(spdk_bdev_queue_io_wait, int,
146 	    (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
147 	     struct spdk_bdev_io_wait_entry *entry),
148 	    0);
149 
150 DEFINE_STUB(spdk_bdev_write_blocks, int,
151 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
152 	     uint64_t offset_blocks, uint64_t num_blocks,
153 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
154 	    0);
155 
156 DEFINE_STUB(spdk_bdev_writev_blocks, int,
157 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
158 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
159 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
160 	    0);
161 
162 DEFINE_STUB(spdk_bdev_read_blocks, int,
163 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
164 	     uint64_t offset_blocks, uint64_t num_blocks,
165 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
166 	    0);
167 
168 DEFINE_STUB(spdk_bdev_readv_blocks, int,
169 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
170 	     struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
171 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
172 	    0);
173 
174 DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
175 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
176 	     uint64_t offset_blocks, uint64_t num_blocks,
177 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
178 	    0);
179 
180 DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
181 	    (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
182 	     const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
183 	     spdk_bdev_io_completion_cb cb, void *cb_arg),
184 	    0);
185 
186 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
187 
188 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
189 	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
190 
191 struct spdk_nvmf_ns *
192 spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
193 {
194 	abort();
195 	return NULL;
196 }
197 
198 struct spdk_nvmf_ns *
199 spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
200 {
201 	abort();
202 	return NULL;
203 }
204 
205 struct spdk_nvmf_ns *
206 spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
207 {
208 	abort();
209 	return NULL;
210 }
211 
212 DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
213 	      (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
214 
215 int
216 spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
217 		  bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
218 		  uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
219 		  uint32_t data_offset, uint16_t guard_seed)
220 {
221 	ctx->block_size = block_size;
222 	ctx->md_size = md_size;
223 	ctx->init_ref_tag = init_ref_tag;
224 
225 	return 0;
226 }
227 
228 static void
229 test_get_rw_params(void)
230 {
231 	struct spdk_nvme_cmd cmd = {0};
232 	uint64_t lba;
233 	uint64_t count;
234 
235 	lba = 0;
236 	count = 0;
237 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
238 	to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
239 	nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
240 	CU_ASSERT(lba == 0x1234567890ABCDEF);
241 	CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
242 }
243 
244 static void
245 test_lba_in_range(void)
246 {
247 	/* Trivial cases (no overflow) */
248 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
249 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
250 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
251 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
252 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
253 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
254 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
255 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
256 
257 	/* Overflow edge cases */
258 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
259 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
260 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
261 	CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
262 }
263 
264 static void
265 test_get_dif_ctx(void)
266 {
267 	struct spdk_bdev bdev = {};
268 	struct spdk_nvme_cmd cmd = {};
269 	struct spdk_dif_ctx dif_ctx = {};
270 	bool ret;
271 
272 	bdev.md_len = 0;
273 
274 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
275 	CU_ASSERT(ret == false);
276 
277 	to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
278 	bdev.blocklen = 520;
279 	bdev.md_len = 8;
280 
281 	ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
282 	CU_ASSERT(ret == true);
283 	CU_ASSERT(dif_ctx.block_size = 520);
284 	CU_ASSERT(dif_ctx.md_size == 8);
285 	CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
286 }
287 
288 static void
289 test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
290 {
291 	int rc;
292 	struct spdk_bdev bdev = {};
293 	struct spdk_bdev_desc *desc = NULL;
294 	struct spdk_io_channel ch = {};
295 
296 	struct spdk_nvmf_request cmp_req = {};
297 	union nvmf_c2h_msg cmp_rsp = {};
298 
299 	struct spdk_nvmf_request write_req = {};
300 	union nvmf_c2h_msg write_rsp = {};
301 
302 	struct spdk_nvmf_qpair qpair = {};
303 
304 	struct spdk_nvme_cmd cmp_cmd = {};
305 	struct spdk_nvme_cmd write_cmd = {};
306 
307 	struct spdk_nvmf_ctrlr ctrlr = {};
308 	struct spdk_nvmf_subsystem subsystem = {};
309 	struct spdk_nvmf_ns ns = {};
310 	struct spdk_nvmf_ns *subsys_ns[1] = {};
311 
312 	struct spdk_nvmf_poll_group group = {};
313 	struct spdk_nvmf_subsystem_poll_group sgroups = {};
314 	struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
315 
316 	bdev.blocklen = 512;
317 	bdev.num_blocks = 10;
318 	ns.bdev = &bdev;
319 
320 	subsystem.id = 0;
321 	subsystem.max_nsid = 1;
322 	subsys_ns[0] = &ns;
323 	subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
324 
325 	/* Enable controller */
326 	ctrlr.vcprop.cc.bits.en = 1;
327 	ctrlr.subsys = &subsystem;
328 
329 	group.num_sgroups = 1;
330 	sgroups.num_ns = 1;
331 	sgroups.ns_info = &ns_info;
332 	group.sgroups = &sgroups;
333 
334 	qpair.ctrlr = &ctrlr;
335 	qpair.group = &group;
336 
337 	cmp_req.qpair = &qpair;
338 	cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
339 	cmp_req.rsp = &cmp_rsp;
340 
341 	cmp_cmd.nsid = 1;
342 	cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
343 	cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
344 
345 	write_req.qpair = &qpair;
346 	write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
347 	write_req.rsp = &write_rsp;
348 
349 	write_cmd.nsid = 1;
350 	write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
351 	write_cmd.opc = SPDK_NVME_OPC_WRITE;
352 
353 	/* 1. SUCCESS */
354 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
355 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
356 
357 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
358 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
359 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
360 
361 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
362 
363 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
364 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
365 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
366 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
367 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
368 
369 	/* 2. Fused command start lba / num blocks mismatch */
370 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
371 	cmp_cmd.cdw12 = 2;	/* NLB: CDW12 bits 15:00, 0's based */
372 
373 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
374 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
375 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
376 
377 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
378 
379 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
380 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
381 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
382 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
383 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
384 
385 	/* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
386 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
387 	cmp_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
388 
389 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
390 	write_cmd.cdw12 = 100;	/* NLB: CDW12 bits 15:00, 0's based */
391 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
392 
393 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
394 
395 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
396 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
397 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
398 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
399 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
400 
401 	/* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
402 	cmp_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
403 	cmp_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
404 
405 	write_cmd.cdw10 = 1;	/* SLBA: CDW10 and CDW11 */
406 	write_cmd.cdw12 = 1;	/* NLB: CDW12 bits 15:00, 0's based */
407 	write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
408 
409 	rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
410 
411 	CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
412 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
413 	CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
414 	CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
415 	CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
416 }
417 
418 int main(int argc, char **argv)
419 {
420 	CU_pSuite	suite = NULL;
421 	unsigned int	num_failures;
422 
423 	CU_set_error_action(CUEA_ABORT);
424 	CU_initialize_registry();
425 
426 	suite = CU_add_suite("nvmf", NULL, NULL);
427 
428 	CU_ADD_TEST(suite, test_get_rw_params);
429 	CU_ADD_TEST(suite, test_lba_in_range);
430 	CU_ADD_TEST(suite, test_get_dif_ctx);
431 
432 	CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
433 
434 	CU_basic_set_mode(CU_BRM_VERBOSE);
435 	CU_basic_run_tests();
436 	num_failures = CU_get_number_of_failures();
437 	CU_cleanup_registry();
438 	return num_failures;
439 }
440