xref: /spdk/test/unit/lib/bdev/compress.c/compress_ut.c (revision 83ba9086796471697a4975a58f60e2392bccd08c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_internal/cunit.h"
8 /* We have our own mock for this */
9 #define UNIT_TEST_NO_VTOPHYS
10 #include "common/lib/test_env.c"
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 #include "unit/lib/json_mock.c"
14 #include "spdk/reduce.h"
15 
16 
17 /* There will be one if the data perfectly matches the chunk size,
18  * or there could be an offset into the data and a remainder after
19  * the data or both for a max of 3.
20  */
21 #define UT_MBUFS_PER_OP 3
22 /* For testing the crossing of a huge page boundary on address translation,
23  * we'll have an extra one but we only test on the source side.
24  */
25 #define UT_MBUFS_PER_OP_BOUND_TEST 4
26 
27 struct spdk_bdev_io *g_bdev_io;
28 struct spdk_io_channel *g_io_ch;
29 struct vbdev_compress g_comp_bdev;
30 struct comp_bdev_io *g_io_ctx;
31 struct comp_io_channel *g_comp_ch;
32 
33 struct spdk_reduce_vol_params g_vol_params;
34 
35 static int ut_spdk_reduce_vol_op_complete_err = 0;
36 void
37 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
38 		       uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
39 		       void *cb_arg)
40 {
41 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
42 }
43 
44 void
45 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
46 		      uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
47 		      void *cb_arg)
48 {
49 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
50 }
51 
52 void
53 spdk_reduce_vol_unmap(struct spdk_reduce_vol *vol,
54 		      uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
55 		      void *cb_arg)
56 {
57 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
58 	spdk_thread_poll(spdk_get_thread(), 0, 0);
59 }
60 
61 #include "bdev/compress/vbdev_compress.c"
62 
63 /* SPDK stubs */
64 DEFINE_STUB(spdk_accel_get_opc_module_name, int, (enum spdk_accel_opcode opcode,
65 		const char **module_name), 0);
66 DEFINE_STUB(spdk_accel_get_io_channel, struct spdk_io_channel *, (void), (void *)0xfeedbeef);
67 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *,
68 	    (const struct spdk_bdev *bdev), NULL);
69 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
70 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
71 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
72 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
73 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
74 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
75 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
76 				     void *cb_arg));
77 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
78 				      spdk_bdev_event_cb_t event_cb,
79 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
80 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
81 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
82 		struct spdk_bdev_module *module), 0);
83 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
84 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
85 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
86 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
87 	    0);
88 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
89 		struct spdk_bdev_io_wait_entry *entry), 0);
90 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol,
91 				       spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
92 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev,
93 				     spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
94 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *,
95 	    (struct spdk_reduce_vol *vol), &g_vol_params);
96 DEFINE_STUB(spdk_reduce_vol_get_pm_path, const char *,
97 	    (const struct spdk_reduce_vol *vol), NULL);
98 DEFINE_STUB_V(spdk_reduce_vol_init, (struct spdk_reduce_vol_params *params,
99 				     struct spdk_reduce_backing_dev *backing_dev,
100 				     const char *pm_file_dir,
101 				     spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
102 DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_dev,
103 					spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
104 DEFINE_STUB(spdk_reduce_vol_get_info, const struct spdk_reduce_vol_info *,
105 	    (const struct spdk_reduce_vol *vol), 0);
106 
107 int g_small_size_counter = 0;
108 int g_small_size_modify = 0;
109 uint64_t g_small_size = 0;
110 uint64_t
111 spdk_vtophys(const void *buf, uint64_t *size)
112 {
113 	g_small_size_counter++;
114 	if (g_small_size_counter == g_small_size_modify) {
115 		*size = g_small_size;
116 		g_small_size_counter = 0;
117 		g_small_size_modify = 0;
118 	}
119 	return (uint64_t)buf;
120 }
121 
122 void
123 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
124 {
125 	cb(g_io_ch, g_bdev_io, true);
126 }
127 
128 /* Mock these functions to call the callback and then return the value we require */
129 int ut_spdk_bdev_readv_blocks = 0;
130 int
131 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
132 		       struct iovec *iov, int iovcnt,
133 		       uint64_t offset_blocks, uint64_t num_blocks,
134 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
135 {
136 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
137 	return ut_spdk_bdev_readv_blocks;
138 }
139 
140 int ut_spdk_bdev_writev_blocks = 0;
141 bool ut_spdk_bdev_writev_blocks_mocked = false;
142 int
143 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
144 			struct iovec *iov, int iovcnt,
145 			uint64_t offset_blocks, uint64_t num_blocks,
146 			spdk_bdev_io_completion_cb cb, void *cb_arg)
147 {
148 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
149 	return ut_spdk_bdev_writev_blocks;
150 }
151 
152 int ut_spdk_bdev_unmap_blocks = 0;
153 bool ut_spdk_bdev_unmap_blocks_mocked = false;
154 int
155 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
156 		       uint64_t offset_blocks, uint64_t num_blocks,
157 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
158 {
159 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
160 	return ut_spdk_bdev_unmap_blocks;
161 }
162 
163 int ut_spdk_bdev_flush_blocks = 0;
164 bool ut_spdk_bdev_flush_blocks_mocked = false;
165 int
166 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
167 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
168 		       void *cb_arg)
169 {
170 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
171 	return ut_spdk_bdev_flush_blocks;
172 }
173 
174 int ut_spdk_bdev_reset = 0;
175 bool ut_spdk_bdev_reset_mocked = false;
176 int
177 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
178 		spdk_bdev_io_completion_cb cb, void *cb_arg)
179 {
180 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
181 	return ut_spdk_bdev_reset;
182 }
183 
184 bool g_completion_called = false;
185 void
186 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
187 {
188 	bdev_io->internal.status = status;
189 	g_completion_called = true;
190 }
191 
192 int
193 spdk_accel_submit_compress_ext(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
194 			       struct iovec *src_iovs, size_t src_iovcnt,
195 			       enum spdk_accel_comp_algo algo, uint32_t level,
196 			       uint32_t *output_size, spdk_accel_completion_cb cb_fn, void *cb_arg)
197 {
198 
199 	return 0;
200 }
201 
202 int
203 spdk_accel_submit_decompress_ext(struct spdk_io_channel *ch, struct iovec *dst_iovs,
204 				 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
205 				 enum spdk_accel_comp_algo algo, uint32_t *output_size,
206 				 spdk_accel_completion_cb cb_fn, void *cb_arg)
207 {
208 
209 	return 0;
210 }
211 
212 /* Global setup for all tests that share a bunch of preparation... */
213 static int
214 test_setup(void)
215 {
216 	struct spdk_thread *thread;
217 
218 	spdk_thread_lib_init(NULL, 0);
219 
220 	thread = spdk_thread_create(NULL, NULL);
221 	spdk_set_thread(thread);
222 
223 	g_comp_bdev.reduce_thread = thread;
224 	g_comp_bdev.backing_dev.submit_backing_io = _comp_reduce_submit_backing_io;
225 	g_comp_bdev.backing_dev.compress = _comp_reduce_compress;
226 	g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress;
227 	g_comp_bdev.backing_dev.blocklen = 512;
228 	g_comp_bdev.backing_dev.blockcnt = 1024 * 16;
229 	g_comp_bdev.backing_dev.sgl_in = true;
230 	g_comp_bdev.backing_dev.sgl_out = true;
231 
232 	TAILQ_INIT(&g_comp_bdev.queued_comp_ops);
233 
234 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io));
235 	g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec));
236 	g_bdev_io->bdev = &g_comp_bdev.comp_bdev;
237 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel));
238 	g_io_ch->thread = thread;
239 	g_comp_ch = (struct comp_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
240 	g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx;
241 
242 	g_io_ctx->comp_ch = g_comp_ch;
243 	g_io_ctx->comp_bdev = &g_comp_bdev;
244 
245 	g_vol_params.chunk_size = 16384;
246 	g_vol_params.logical_block_size = 512;
247 
248 	return 0;
249 }
250 
251 /* Global teardown for all tests */
252 static int
253 test_cleanup(void)
254 {
255 	struct spdk_thread *thread;
256 
257 	free(g_bdev_io->u.bdev.iovs);
258 	free(g_bdev_io);
259 	free(g_io_ch);
260 
261 	thread = spdk_get_thread();
262 	spdk_thread_exit(thread);
263 	while (!spdk_thread_is_exited(thread)) {
264 		spdk_thread_poll(thread, 0, 0);
265 	}
266 	spdk_thread_destroy(thread);
267 
268 	spdk_thread_lib_fini();
269 
270 	return 0;
271 }
272 
273 static void
274 test_compress_operation(void)
275 {
276 }
277 
278 static void
279 test_compress_operation_cross_boundary(void)
280 {
281 }
282 
283 static void
284 test_vbdev_compress_submit_request(void)
285 {
286 	/* Single element block size write */
287 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
288 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
289 	g_completion_called = false;
290 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
291 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
292 	CU_ASSERT(g_completion_called == true);
293 	CU_ASSERT(g_io_ctx->orig_io == g_bdev_io);
294 	CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev);
295 	CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch);
296 
297 	/* same write but now fail it */
298 	ut_spdk_reduce_vol_op_complete_err = 1;
299 	g_completion_called = false;
300 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
301 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
302 	CU_ASSERT(g_completion_called == true);
303 
304 	/* test a read success */
305 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
306 	ut_spdk_reduce_vol_op_complete_err = 0;
307 	g_completion_called = false;
308 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
309 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
310 	CU_ASSERT(g_completion_called == true);
311 
312 	/* test a read failure */
313 	ut_spdk_reduce_vol_op_complete_err = 1;
314 	g_completion_called = false;
315 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
316 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
317 	CU_ASSERT(g_completion_called == true);
318 
319 	/* test unmap. for io unit = 4k, blocksize=512, chunksize=16k.
320 	 * array about offset and length, unit block.
321 	 * Contains cases of {one partial chunk;
322 	 * one full chunk;
323 	 * one full chunk and one partial tail chunks;
324 	 * one full chunk and two partial head/tail chunks;
325 	 * multi full chunk and two partial chunks} */
326 	uint64_t unmap_io_array[][2] = {
327 		{7, 15},
328 		{0, 32},
329 		{0, 47},
330 		{7, 83},
331 		{17, 261}
332 	};
333 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
334 	for (uint64_t i = 0; i < SPDK_COUNTOF(unmap_io_array); i++) {
335 		g_bdev_io->u.bdev.offset_blocks = unmap_io_array[i][0];
336 		g_bdev_io->u.bdev.offset_blocks = unmap_io_array[i][1];
337 		/* test success */
338 		ut_spdk_reduce_vol_op_complete_err = 0;
339 		g_completion_called = false;
340 		vbdev_compress_submit_request(g_io_ch, g_bdev_io);
341 		CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
342 		CU_ASSERT(g_completion_called == true);
343 
344 		/* test fail */
345 		ut_spdk_reduce_vol_op_complete_err = 1;
346 		g_completion_called = false;
347 		vbdev_compress_submit_request(g_io_ch, g_bdev_io);
348 		CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
349 		CU_ASSERT(g_completion_called == true);
350 	}
351 }
352 
353 static void
354 test_passthru(void)
355 {
356 
357 }
358 
359 static void
360 test_reset(void)
361 {
362 	/* TODO: There are a few different ways to do this given that
363 	 * the code uses spdk_for_each_channel() to implement reset
364 	 * handling. SUbmitting w/o UT for this function for now and
365 	 * will follow up with something shortly.
366 	 */
367 }
368 
369 bool g_comp_base_support_rw = true;
370 
371 bool
372 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
373 {
374 	return g_comp_base_support_rw;
375 }
376 
377 static void
378 test_supported_io(void)
379 {
380 	g_comp_base_support_rw = false;
381 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == false);
382 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == false);
383 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
384 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
385 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
386 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
387 
388 	g_comp_base_support_rw = true;
389 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == true);
390 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == true);
391 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
392 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
393 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
394 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
395 
396 
397 }
398 
399 int
400 main(int argc, char **argv)
401 {
402 	CU_pSuite	suite = NULL;
403 	unsigned int	num_failures;
404 
405 	CU_initialize_registry();
406 
407 	suite = CU_add_suite("compress", test_setup, test_cleanup);
408 	CU_ADD_TEST(suite, test_compress_operation);
409 	CU_ADD_TEST(suite, test_compress_operation_cross_boundary);
410 	CU_ADD_TEST(suite, test_vbdev_compress_submit_request);
411 	CU_ADD_TEST(suite, test_passthru);
412 	CU_ADD_TEST(suite, test_supported_io);
413 	CU_ADD_TEST(suite, test_reset);
414 
415 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
416 	CU_cleanup_registry();
417 	return num_failures;
418 }
419