1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 /* We have our own mock for this */ 9 #define UNIT_TEST_NO_VTOPHYS 10 #include "common/lib/test_env.c" 11 #include "spdk_internal/mock.h" 12 #include "thread/thread_internal.h" 13 #include "unit/lib/json_mock.c" 14 #include "spdk/reduce.h" 15 16 17 /* There will be one if the data perfectly matches the chunk size, 18 * or there could be an offset into the data and a remainder after 19 * the data or both for a max of 3. 20 */ 21 #define UT_MBUFS_PER_OP 3 22 /* For testing the crossing of a huge page boundary on address translation, 23 * we'll have an extra one but we only test on the source side. 24 */ 25 #define UT_MBUFS_PER_OP_BOUND_TEST 4 26 27 struct spdk_bdev_io *g_bdev_io; 28 struct spdk_io_channel *g_io_ch; 29 struct vbdev_compress g_comp_bdev; 30 struct comp_bdev_io *g_io_ctx; 31 struct comp_io_channel *g_comp_ch; 32 33 static int ut_spdk_reduce_vol_op_complete_err = 0; 34 void 35 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 36 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 37 void *cb_arg) 38 { 39 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 40 } 41 42 void 43 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 44 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 45 void *cb_arg) 46 { 47 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 48 } 49 50 #include "bdev/compress/vbdev_compress.c" 51 52 /* SPDK stubs */ 53 DEFINE_STUB(spdk_accel_get_opc_module_name, int, (enum spdk_accel_opcode opcode, 54 const char **module_name), 0); 55 DEFINE_STUB(spdk_accel_get_io_channel, struct spdk_io_channel *, (void), (void *)0xfeedbeef); 56 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *, 57 (const struct spdk_bdev *bdev), NULL); 58 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 59 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); 60 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); 61 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 62 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); 63 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); 64 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, 65 void *cb_arg)); 66 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write, 67 spdk_bdev_event_cb_t event_cb, 68 void *event_ctx, struct spdk_bdev_desc **_desc), 0); 69 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL); 70 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 71 struct spdk_bdev_module *module), 0); 72 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 73 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); 74 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL); 75 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io), 76 0); 77 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 78 struct spdk_bdev_io_wait_entry *entry), 0); 79 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol, 80 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 81 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev, 82 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 83 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *, 84 (struct spdk_reduce_vol *vol), NULL); 85 DEFINE_STUB(spdk_reduce_vol_get_pm_path, const char *, 86 (const struct spdk_reduce_vol *vol), NULL); 87 DEFINE_STUB_V(spdk_reduce_vol_init, (struct spdk_reduce_vol_params *params, 88 struct spdk_reduce_backing_dev *backing_dev, 89 const char *pm_file_dir, 90 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 91 DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_dev, 92 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 93 94 int g_small_size_counter = 0; 95 int g_small_size_modify = 0; 96 uint64_t g_small_size = 0; 97 uint64_t 98 spdk_vtophys(const void *buf, uint64_t *size) 99 { 100 g_small_size_counter++; 101 if (g_small_size_counter == g_small_size_modify) { 102 *size = g_small_size; 103 g_small_size_counter = 0; 104 g_small_size_modify = 0; 105 } 106 return (uint64_t)buf; 107 } 108 109 void 110 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 111 { 112 cb(g_io_ch, g_bdev_io, true); 113 } 114 115 /* Mock these functions to call the callback and then return the value we require */ 116 int ut_spdk_bdev_readv_blocks = 0; 117 int 118 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct iovec *iov, int iovcnt, 120 uint64_t offset_blocks, uint64_t num_blocks, 121 spdk_bdev_io_completion_cb cb, void *cb_arg) 122 { 123 cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); 124 return ut_spdk_bdev_readv_blocks; 125 } 126 127 int ut_spdk_bdev_writev_blocks = 0; 128 bool ut_spdk_bdev_writev_blocks_mocked = false; 129 int 130 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct iovec *iov, int iovcnt, 132 uint64_t offset_blocks, uint64_t num_blocks, 133 spdk_bdev_io_completion_cb cb, void *cb_arg) 134 { 135 cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); 136 return ut_spdk_bdev_writev_blocks; 137 } 138 139 int ut_spdk_bdev_unmap_blocks = 0; 140 bool ut_spdk_bdev_unmap_blocks_mocked = false; 141 int 142 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 uint64_t offset_blocks, uint64_t num_blocks, 144 spdk_bdev_io_completion_cb cb, void *cb_arg) 145 { 146 cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); 147 return ut_spdk_bdev_unmap_blocks; 148 } 149 150 int ut_spdk_bdev_flush_blocks = 0; 151 bool ut_spdk_bdev_flush_blocks_mocked = false; 152 int 153 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 154 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 155 void *cb_arg) 156 { 157 cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); 158 return ut_spdk_bdev_flush_blocks; 159 } 160 161 int ut_spdk_bdev_reset = 0; 162 bool ut_spdk_bdev_reset_mocked = false; 163 int 164 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 165 spdk_bdev_io_completion_cb cb, void *cb_arg) 166 { 167 cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); 168 return ut_spdk_bdev_reset; 169 } 170 171 bool g_completion_called = false; 172 void 173 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 174 { 175 bdev_io->internal.status = status; 176 g_completion_called = true; 177 } 178 179 int 180 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 181 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 182 spdk_accel_completion_cb cb_fn, void *cb_arg) 183 { 184 185 return 0; 186 } 187 188 int 189 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, size_t dst_iovcnt, 190 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, 191 spdk_accel_completion_cb cb_fn, void *cb_arg) 192 { 193 194 return 0; 195 } 196 197 /* Global setup for all tests that share a bunch of preparation... */ 198 static int 199 test_setup(void) 200 { 201 struct spdk_thread *thread; 202 203 spdk_thread_lib_init(NULL, 0); 204 205 thread = spdk_thread_create(NULL, NULL); 206 spdk_set_thread(thread); 207 208 g_comp_bdev.reduce_thread = thread; 209 g_comp_bdev.backing_dev.submit_backing_io = _comp_reduce_submit_backing_io; 210 g_comp_bdev.backing_dev.compress = _comp_reduce_compress; 211 g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress; 212 g_comp_bdev.backing_dev.blocklen = 512; 213 g_comp_bdev.backing_dev.blockcnt = 1024 * 16; 214 g_comp_bdev.backing_dev.sgl_in = true; 215 g_comp_bdev.backing_dev.sgl_out = true; 216 217 TAILQ_INIT(&g_comp_bdev.queued_comp_ops); 218 219 g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io)); 220 g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec)); 221 g_bdev_io->bdev = &g_comp_bdev.comp_bdev; 222 g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel)); 223 g_io_ch->thread = thread; 224 g_comp_ch = (struct comp_io_channel *)spdk_io_channel_get_ctx(g_io_ch); 225 g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx; 226 227 g_io_ctx->comp_ch = g_comp_ch; 228 g_io_ctx->comp_bdev = &g_comp_bdev; 229 230 return 0; 231 } 232 233 /* Global teardown for all tests */ 234 static int 235 test_cleanup(void) 236 { 237 struct spdk_thread *thread; 238 239 free(g_bdev_io->u.bdev.iovs); 240 free(g_bdev_io); 241 free(g_io_ch); 242 243 thread = spdk_get_thread(); 244 spdk_thread_exit(thread); 245 while (!spdk_thread_is_exited(thread)) { 246 spdk_thread_poll(thread, 0, 0); 247 } 248 spdk_thread_destroy(thread); 249 250 spdk_thread_lib_fini(); 251 252 return 0; 253 } 254 255 static void 256 test_compress_operation(void) 257 { 258 } 259 260 static void 261 test_compress_operation_cross_boundary(void) 262 { 263 } 264 265 static void 266 test_vbdev_compress_submit_request(void) 267 { 268 /* Single element block size write */ 269 g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 270 g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; 271 g_completion_called = false; 272 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 273 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 274 CU_ASSERT(g_completion_called == true); 275 CU_ASSERT(g_io_ctx->orig_io == g_bdev_io); 276 CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev); 277 CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch); 278 279 /* same write but now fail it */ 280 ut_spdk_reduce_vol_op_complete_err = 1; 281 g_completion_called = false; 282 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 283 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 284 CU_ASSERT(g_completion_called == true); 285 286 /* test a read success */ 287 g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; 288 ut_spdk_reduce_vol_op_complete_err = 0; 289 g_completion_called = false; 290 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 291 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 292 CU_ASSERT(g_completion_called == true); 293 294 /* test a read failure */ 295 ut_spdk_reduce_vol_op_complete_err = 1; 296 g_completion_called = false; 297 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 298 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 299 CU_ASSERT(g_completion_called == true); 300 } 301 302 static void 303 test_passthru(void) 304 { 305 306 } 307 308 static void 309 test_reset(void) 310 { 311 /* TODO: There are a few different ways to do this given that 312 * the code uses spdk_for_each_channel() to implement reset 313 * handling. SUbmitting w/o UT for this function for now and 314 * will follow up with something shortly. 315 */ 316 } 317 318 bool g_comp_base_support_rw = true; 319 320 bool 321 spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type) 322 { 323 return g_comp_base_support_rw; 324 } 325 326 static void 327 test_supported_io(void) 328 { 329 g_comp_base_support_rw = false; 330 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == false); 331 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == false); 332 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false); 333 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false); 334 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false); 335 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false); 336 337 g_comp_base_support_rw = true; 338 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_READ) == true); 339 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE) == true); 340 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_UNMAP) == false); 341 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_RESET) == false); 342 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_FLUSH) == false); 343 CU_ASSERT(vbdev_compress_io_type_supported(&g_comp_bdev, SPDK_BDEV_IO_TYPE_WRITE_ZEROES) == false); 344 345 346 } 347 348 int 349 main(int argc, char **argv) 350 { 351 CU_pSuite suite = NULL; 352 unsigned int num_failures; 353 354 CU_initialize_registry(); 355 356 suite = CU_add_suite("compress", test_setup, test_cleanup); 357 CU_ADD_TEST(suite, test_compress_operation); 358 CU_ADD_TEST(suite, test_compress_operation_cross_boundary); 359 CU_ADD_TEST(suite, test_vbdev_compress_submit_request); 360 CU_ADD_TEST(suite, test_passthru); 361 CU_ADD_TEST(suite, test_supported_io); 362 CU_ADD_TEST(suite, test_reset); 363 364 num_failures = spdk_ut_run_tests(argc, argv, NULL); 365 CU_cleanup_registry(); 366 return num_failures; 367 } 368