xref: /spdk/test/unit/lib/bdev/compress.c/compress_ut.c (revision 95d6c9fac17572b107042103439aafd696d60b0e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk_internal/cunit.h"
8 /* We have our own mock for this */
9 #define UNIT_TEST_NO_VTOPHYS
10 #include "common/lib/test_env.c"
11 #include "spdk_internal/mock.h"
12 #include "thread/thread_internal.h"
13 #include "unit/lib/json_mock.c"
14 #include "spdk/reduce.h"
15 
16 
17 /* There will be one if the data perfectly matches the chunk size,
18  * or there could be an offset into the data and a remainder after
19  * the data or both for a max of 3.
20  */
21 #define UT_MBUFS_PER_OP 3
22 /* For testing the crossing of a huge page boundary on address translation,
23  * we'll have an extra one but we only test on the source side.
24  */
25 #define UT_MBUFS_PER_OP_BOUND_TEST 4
26 
27 struct spdk_bdev_io *g_bdev_io;
28 struct spdk_io_channel *g_io_ch;
29 struct vbdev_compress g_comp_bdev;
30 struct comp_bdev_io *g_io_ctx;
31 struct comp_io_channel *g_comp_ch;
32 
33 struct spdk_reduce_vol_params g_vol_params;
34 
35 static int ut_spdk_reduce_vol_op_complete_err = 0;
36 void
37 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
38 		       uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
39 		       void *cb_arg)
40 {
41 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
42 }
43 
44 void
45 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
46 		      uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
47 		      void *cb_arg)
48 {
49 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
50 }
51 
52 void
53 spdk_reduce_vol_unmap(struct spdk_reduce_vol *vol,
54 		      uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn,
55 		      void *cb_arg)
56 {
57 	cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err);
58 	spdk_thread_poll(spdk_get_thread(), 0, 0);
59 }
60 
61 #include "bdev/compress/vbdev_compress.c"
62 
63 /* SPDK stubs */
64 DEFINE_STUB(spdk_accel_get_opc_module_name, int, (enum spdk_accel_opcode opcode,
65 		const char **module_name), 0);
66 DEFINE_STUB(spdk_accel_get_io_channel, struct spdk_io_channel *, (void), (void *)0xfeedbeef);
67 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *,
68 	    (const struct spdk_bdev *bdev), NULL);
69 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
70 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
71 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
72 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
73 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
74 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
75 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
76 				     void *cb_arg));
77 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write,
78 				      spdk_bdev_event_cb_t event_cb,
79 				      void *event_ctx, struct spdk_bdev_desc **_desc), 0);
80 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL);
81 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
82 		struct spdk_bdev_module *module), 0);
83 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
84 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0);
85 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
86 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io),
87 	    0);
88 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
89 		struct spdk_bdev_io_wait_entry *entry), 0);
90 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol,
91 				       spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
92 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev,
93 				     spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
94 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *,
95 	    (struct spdk_reduce_vol *vol), &g_vol_params);
96 DEFINE_STUB(spdk_reduce_vol_get_pm_path, const char *,
97 	    (const struct spdk_reduce_vol *vol), NULL);
98 DEFINE_STUB_V(spdk_reduce_vol_init, (struct spdk_reduce_vol_params *params,
99 				     struct spdk_reduce_backing_dev *backing_dev,
100 				     const char *pm_file_dir,
101 				     spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg));
102 DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_dev,
103 					spdk_reduce_vol_op_complete cb_fn, void *cb_arg));
104 
105 int g_small_size_counter = 0;
106 int g_small_size_modify = 0;
107 uint64_t g_small_size = 0;
108 uint64_t
109 spdk_vtophys(const void *buf, uint64_t *size)
110 {
111 	g_small_size_counter++;
112 	if (g_small_size_counter == g_small_size_modify) {
113 		*size = g_small_size;
114 		g_small_size_counter = 0;
115 		g_small_size_modify = 0;
116 	}
117 	return (uint64_t)buf;
118 }
119 
120 void
121 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
122 {
123 	cb(g_io_ch, g_bdev_io, true);
124 }
125 
126 /* Mock these functions to call the callback and then return the value we require */
127 int ut_spdk_bdev_readv_blocks = 0;
128 int
129 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
130 		       struct iovec *iov, int iovcnt,
131 		       uint64_t offset_blocks, uint64_t num_blocks,
132 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
133 {
134 	cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
135 	return ut_spdk_bdev_readv_blocks;
136 }
137 
138 int ut_spdk_bdev_writev_blocks = 0;
139 bool ut_spdk_bdev_writev_blocks_mocked = false;
140 int
141 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
142 			struct iovec *iov, int iovcnt,
143 			uint64_t offset_blocks, uint64_t num_blocks,
144 			spdk_bdev_io_completion_cb cb, void *cb_arg)
145 {
146 	cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
147 	return ut_spdk_bdev_writev_blocks;
148 }
149 
150 int ut_spdk_bdev_unmap_blocks = 0;
151 bool ut_spdk_bdev_unmap_blocks_mocked = false;
152 int
153 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
154 		       uint64_t offset_blocks, uint64_t num_blocks,
155 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
156 {
157 	cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
158 	return ut_spdk_bdev_unmap_blocks;
159 }
160 
161 int ut_spdk_bdev_flush_blocks = 0;
162 bool ut_spdk_bdev_flush_blocks_mocked = false;
163 int
164 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
165 		       uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
166 		       void *cb_arg)
167 {
168 	cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
169 	return ut_spdk_bdev_flush_blocks;
170 }
171 
172 int ut_spdk_bdev_reset = 0;
173 bool ut_spdk_bdev_reset_mocked = false;
174 int
175 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
176 		spdk_bdev_io_completion_cb cb, void *cb_arg)
177 {
178 	cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
179 	return ut_spdk_bdev_reset;
180 }
181 
182 bool g_completion_called = false;
183 void
184 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
185 {
186 	bdev_io->internal.status = status;
187 	g_completion_called = true;
188 }
189 
190 int
191 spdk_accel_submit_compress_ext(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
192 			       struct iovec *src_iovs, size_t src_iovcnt,
193 			       enum spdk_accel_comp_algo algo, uint32_t level,
194 			       uint32_t *output_size, spdk_accel_completion_cb cb_fn, void *cb_arg)
195 {
196 
197 	return 0;
198 }
199 
200 int
201 spdk_accel_submit_decompress_ext(struct spdk_io_channel *ch, struct iovec *dst_iovs,
202 				 size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
203 				 enum spdk_accel_comp_algo algo, uint32_t *output_size,
204 				 spdk_accel_completion_cb cb_fn, void *cb_arg)
205 {
206 
207 	return 0;
208 }
209 
210 /* Global setup for all tests that share a bunch of preparation... */
211 static int
212 test_setup(void)
213 {
214 	struct spdk_thread *thread;
215 
216 	spdk_thread_lib_init(NULL, 0);
217 
218 	thread = spdk_thread_create(NULL, NULL);
219 	spdk_set_thread(thread);
220 
221 	g_comp_bdev.reduce_thread = thread;
222 	g_comp_bdev.backing_dev.submit_backing_io = _comp_reduce_submit_backing_io;
223 	g_comp_bdev.backing_dev.compress = _comp_reduce_compress;
224 	g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress;
225 	g_comp_bdev.backing_dev.blocklen = 512;
226 	g_comp_bdev.backing_dev.blockcnt = 1024 * 16;
227 	g_comp_bdev.backing_dev.sgl_in = true;
228 	g_comp_bdev.backing_dev.sgl_out = true;
229 
230 	TAILQ_INIT(&g_comp_bdev.queued_comp_ops);
231 
232 	g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io));
233 	g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec));
234 	g_bdev_io->bdev = &g_comp_bdev.comp_bdev;
235 	g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel));
236 	g_io_ch->thread = thread;
237 	g_comp_ch = (struct comp_io_channel *)spdk_io_channel_get_ctx(g_io_ch);
238 	g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx;
239 
240 	g_io_ctx->comp_ch = g_comp_ch;
241 	g_io_ctx->comp_bdev = &g_comp_bdev;
242 
243 	g_vol_params.chunk_size = 16384;
244 	g_vol_params.logical_block_size = 512;
245 
246 	return 0;
247 }
248 
249 /* Global teardown for all tests */
250 static int
251 test_cleanup(void)
252 {
253 	struct spdk_thread *thread;
254 
255 	free(g_bdev_io->u.bdev.iovs);
256 	free(g_bdev_io);
257 	free(g_io_ch);
258 
259 	thread = spdk_get_thread();
260 	spdk_thread_exit(thread);
261 	while (!spdk_thread_is_exited(thread)) {
262 		spdk_thread_poll(thread, 0, 0);
263 	}
264 	spdk_thread_destroy(thread);
265 
266 	spdk_thread_lib_fini();
267 
268 	return 0;
269 }
270 
271 static void
272 test_compress_operation(void)
273 {
274 }
275 
276 static void
277 test_compress_operation_cross_boundary(void)
278 {
279 }
280 
281 static void
282 test_vbdev_compress_submit_request(void)
283 {
284 	/* Single element block size write */
285 	g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
286 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
287 	g_completion_called = false;
288 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
289 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
290 	CU_ASSERT(g_completion_called == true);
291 	CU_ASSERT(g_io_ctx->orig_io == g_bdev_io);
292 	CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev);
293 	CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch);
294 
295 	/* same write but now fail it */
296 	ut_spdk_reduce_vol_op_complete_err = 1;
297 	g_completion_called = false;
298 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
299 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
300 	CU_ASSERT(g_completion_called == true);
301 
302 	/* test a read success */
303 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
304 	ut_spdk_reduce_vol_op_complete_err = 0;
305 	g_completion_called = false;
306 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
307 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
308 	CU_ASSERT(g_completion_called == true);
309 
310 	/* test a read failure */
311 	ut_spdk_reduce_vol_op_complete_err = 1;
312 	g_completion_called = false;
313 	vbdev_compress_submit_request(g_io_ch, g_bdev_io);
314 	CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
315 	CU_ASSERT(g_completion_called == true);
316 
317 	/* test unmap. for io unit = 4k, blocksize=512, chunksize=16k.
318 	 * array about offset and length, unit block.
319 	 * Contains cases of {one partial chunk;
320 	 * one full chunk;
321 	 * one full chunk and one partial tail chunks;
322 	 * one full chunk and two partial head/tail chunks;
323 	 * multi full chunk and two partial chunks} */
324 	uint64_t unmap_io_array[][2] = {
325 		{7, 15},
326 		{0, 32},
327 		{0, 47},
328 		{7, 83},
329 		{17, 261}
330 	};
331 	g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
332 	for (uint64_t i = 0; i < SPDK_COUNTOF(unmap_io_array); i++) {
333 		g_bdev_io->u.bdev.offset_blocks = unmap_io_array[i][0];
334 		g_bdev_io->u.bdev.offset_blocks = unmap_io_array[i][1];
335 		/* test success */
336 		ut_spdk_reduce_vol_op_complete_err = 0;
337 		g_completion_called = false;
338 		vbdev_compress_submit_request(g_io_ch, g_bdev_io);
339 		CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
340 		CU_ASSERT(g_completion_called == true);
341 
342 		/* test fail */
343 		ut_spdk_reduce_vol_op_complete_err = 1;
344 		g_completion_called = false;
345 		vbdev_compress_submit_request(g_io_ch, g_bdev_io);
346 		CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
347 		CU_ASSERT(g_completion_called == true);
348 	}
349 }
350 
351 static void
352 test_passthru(void)
353 {
354 
355 }
356 
357 static void
358 test_reset(void)
359 {
360 	/* TODO: There are a few different ways to do this given that
361 	 * the code uses spdk_for_each_channel() to implement reset
362 	 * handling. SUbmitting w/o UT for this function for now and
363 	 * will follow up with something shortly.
364 	 */
365 }
366 
367 bool g_comp_base_support_rw = true;
368 
369 bool
370 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
371 {
372 	return g_comp_base_support_rw;
373 }
374 
375 static void
376 test_supported_io(void)
377 {
378 	g_comp_base_support_rw = false;
379 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == false);
380 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == false);
381 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
382 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
383 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
384 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
385 
386 	g_comp_base_support_rw = true;
387 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == true);
388 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == true);
389 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == true);
390 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false);
391 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false);
392 	CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false);
393 
394 
395 }
396 
397 int
398 main(int argc, char **argv)
399 {
400 	CU_pSuite	suite = NULL;
401 	unsigned int	num_failures;
402 
403 	CU_initialize_registry();
404 
405 	suite = CU_add_suite("compress", test_setup, test_cleanup);
406 	CU_ADD_TEST(suite, test_compress_operation);
407 	CU_ADD_TEST(suite, test_compress_operation_cross_boundary);
408 	CU_ADD_TEST(suite, test_vbdev_compress_submit_request);
409 	CU_ADD_TEST(suite, test_passthru);
410 	CU_ADD_TEST(suite, test_supported_io);
411 	CU_ADD_TEST(suite, test_reset);
412 
413 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
414 	CU_cleanup_registry();
415 	return num_failures;
416 }
417