1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk_internal/cunit.h" 8 /* We have our own mock for this */ 9 #define UNIT_TEST_NO_VTOPHYS 10 #include "common/lib/test_env.c" 11 #include "spdk_internal/mock.h" 12 #include "thread/thread_internal.h" 13 #include "unit/lib/json_mock.c" 14 #include "spdk/reduce.h" 15 16 17 /* There will be one if the data perfectly matches the chunk size, 18 * or there could be an offset into the data and a remainder after 19 * the data or both for a max of 3. 20 */ 21 #define UT_MBUFS_PER_OP 3 22 /* For testing the crossing of a huge page boundary on address translation, 23 * we'll have an extra one but we only test on the source side. 24 */ 25 #define UT_MBUFS_PER_OP_BOUND_TEST 4 26 27 struct spdk_bdev_io *g_bdev_io; 28 struct spdk_io_channel *g_io_ch; 29 struct vbdev_compress g_comp_bdev; 30 struct comp_bdev_io *g_io_ctx; 31 struct comp_io_channel *g_comp_ch; 32 33 static int ut_spdk_reduce_vol_op_complete_err = 0; 34 void 35 spdk_reduce_vol_writev(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 36 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 37 void *cb_arg) 38 { 39 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 40 } 41 42 void 43 spdk_reduce_vol_readv(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt, 44 uint64_t offset, uint64_t length, spdk_reduce_vol_op_complete cb_fn, 45 void *cb_arg) 46 { 47 cb_fn(cb_arg, ut_spdk_reduce_vol_op_complete_err); 48 } 49 50 #include "bdev/compress/vbdev_compress.c" 51 52 /* SPDK stubs */ 53 DEFINE_STUB(spdk_accel_get_opc_module_name, int, (enum spdk_accel_opcode opcode, 54 const char **module_name), 0); 55 DEFINE_STUB(spdk_accel_get_io_channel, struct spdk_io_channel *, (void), (void *)0xfeedbeef); 56 DEFINE_STUB(spdk_bdev_get_aliases, const struct spdk_bdev_aliases_list *, 57 (const struct spdk_bdev *bdev), NULL); 58 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 59 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io)); 60 DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev, 61 enum spdk_bdev_io_type io_type), 0); 62 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); 63 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 64 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0); 65 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0); 66 DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, 67 void *cb_arg)); 68 DEFINE_STUB(spdk_bdev_open_ext, int, (const char *bdev_name, bool write, 69 spdk_bdev_event_cb_t event_cb, 70 void *event_ctx, struct spdk_bdev_desc **_desc), 0); 71 DEFINE_STUB(spdk_bdev_desc_get_bdev, struct spdk_bdev *, (struct spdk_bdev_desc *desc), NULL); 72 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 73 struct spdk_bdev_module *module), 0); 74 DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module)); 75 DEFINE_STUB(spdk_bdev_register, int, (struct spdk_bdev *bdev), 0); 76 DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL); 77 DEFINE_STUB(spdk_bdev_io_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_io *bdev_io), 78 0); 79 DEFINE_STUB(spdk_bdev_queue_io_wait, int, (struct spdk_bdev *bdev, struct spdk_io_channel *ch, 80 struct spdk_bdev_io_wait_entry *entry), 0); 81 DEFINE_STUB_V(spdk_reduce_vol_unload, (struct spdk_reduce_vol *vol, 82 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 83 DEFINE_STUB_V(spdk_reduce_vol_load, (struct spdk_reduce_backing_dev *backing_dev, 84 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 85 DEFINE_STUB(spdk_reduce_vol_get_params, const struct spdk_reduce_vol_params *, 86 (struct spdk_reduce_vol *vol), NULL); 87 DEFINE_STUB_V(spdk_reduce_vol_init, (struct spdk_reduce_vol_params *params, 88 struct spdk_reduce_backing_dev *backing_dev, 89 const char *pm_file_dir, 90 spdk_reduce_vol_op_with_handle_complete cb_fn, void *cb_arg)); 91 DEFINE_STUB_V(spdk_reduce_vol_destroy, (struct spdk_reduce_backing_dev *backing_dev, 92 spdk_reduce_vol_op_complete cb_fn, void *cb_arg)); 93 94 int g_small_size_counter = 0; 95 int g_small_size_modify = 0; 96 uint64_t g_small_size = 0; 97 uint64_t 98 spdk_vtophys(const void *buf, uint64_t *size) 99 { 100 g_small_size_counter++; 101 if (g_small_size_counter == g_small_size_modify) { 102 *size = g_small_size; 103 g_small_size_counter = 0; 104 g_small_size_modify = 0; 105 } 106 return (uint64_t)buf; 107 } 108 109 void 110 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 111 { 112 cb(g_io_ch, g_bdev_io, true); 113 } 114 115 /* Mock these functions to call the callback and then return the value we require */ 116 int ut_spdk_bdev_readv_blocks = 0; 117 int 118 spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct iovec *iov, int iovcnt, 120 uint64_t offset_blocks, uint64_t num_blocks, 121 spdk_bdev_io_completion_cb cb, void *cb_arg) 122 { 123 cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg); 124 return ut_spdk_bdev_readv_blocks; 125 } 126 127 int ut_spdk_bdev_writev_blocks = 0; 128 bool ut_spdk_bdev_writev_blocks_mocked = false; 129 int 130 spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct iovec *iov, int iovcnt, 132 uint64_t offset_blocks, uint64_t num_blocks, 133 spdk_bdev_io_completion_cb cb, void *cb_arg) 134 { 135 cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg); 136 return ut_spdk_bdev_writev_blocks; 137 } 138 139 int ut_spdk_bdev_unmap_blocks = 0; 140 bool ut_spdk_bdev_unmap_blocks_mocked = false; 141 int 142 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 uint64_t offset_blocks, uint64_t num_blocks, 144 spdk_bdev_io_completion_cb cb, void *cb_arg) 145 { 146 cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg); 147 return ut_spdk_bdev_unmap_blocks; 148 } 149 150 int ut_spdk_bdev_flush_blocks = 0; 151 bool ut_spdk_bdev_flush_blocks_mocked = false; 152 int 153 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 154 uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb, 155 void *cb_arg) 156 { 157 cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg); 158 return ut_spdk_bdev_flush_blocks; 159 } 160 161 int ut_spdk_bdev_reset = 0; 162 bool ut_spdk_bdev_reset_mocked = false; 163 int 164 spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 165 spdk_bdev_io_completion_cb cb, void *cb_arg) 166 { 167 cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg); 168 return ut_spdk_bdev_reset; 169 } 170 171 bool g_completion_called = false; 172 void 173 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 174 { 175 bdev_io->internal.status = status; 176 g_completion_called = true; 177 } 178 179 int 180 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes, 181 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 182 spdk_accel_completion_cb cb_fn, void *cb_arg) 183 { 184 185 return 0; 186 } 187 188 int 189 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs, size_t dst_iovcnt, 190 struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags, 191 spdk_accel_completion_cb cb_fn, void *cb_arg) 192 { 193 194 return 0; 195 } 196 197 /* Global setup for all tests that share a bunch of preparation... */ 198 static int 199 test_setup(void) 200 { 201 struct spdk_thread *thread; 202 203 spdk_thread_lib_init(NULL, 0); 204 205 thread = spdk_thread_create(NULL, NULL); 206 spdk_set_thread(thread); 207 208 g_comp_bdev.reduce_thread = thread; 209 g_comp_bdev.backing_dev.unmap = _comp_reduce_unmap; 210 g_comp_bdev.backing_dev.readv = _comp_reduce_readv; 211 g_comp_bdev.backing_dev.writev = _comp_reduce_writev; 212 g_comp_bdev.backing_dev.compress = _comp_reduce_compress; 213 g_comp_bdev.backing_dev.decompress = _comp_reduce_decompress; 214 g_comp_bdev.backing_dev.blocklen = 512; 215 g_comp_bdev.backing_dev.blockcnt = 1024 * 16; 216 g_comp_bdev.backing_dev.sgl_in = true; 217 g_comp_bdev.backing_dev.sgl_out = true; 218 219 TAILQ_INIT(&g_comp_bdev.queued_comp_ops); 220 221 g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct comp_bdev_io)); 222 g_bdev_io->u.bdev.iovs = calloc(128, sizeof(struct iovec)); 223 g_bdev_io->bdev = &g_comp_bdev.comp_bdev; 224 g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct comp_io_channel)); 225 g_io_ch->thread = thread; 226 g_comp_ch = (struct comp_io_channel *)spdk_io_channel_get_ctx(g_io_ch); 227 g_io_ctx = (struct comp_bdev_io *)g_bdev_io->driver_ctx; 228 229 g_io_ctx->comp_ch = g_comp_ch; 230 g_io_ctx->comp_bdev = &g_comp_bdev; 231 232 return 0; 233 } 234 235 /* Global teardown for all tests */ 236 static int 237 test_cleanup(void) 238 { 239 struct spdk_thread *thread; 240 241 free(g_bdev_io->u.bdev.iovs); 242 free(g_bdev_io); 243 free(g_io_ch); 244 245 thread = spdk_get_thread(); 246 spdk_thread_exit(thread); 247 while (!spdk_thread_is_exited(thread)) { 248 spdk_thread_poll(thread, 0, 0); 249 } 250 spdk_thread_destroy(thread); 251 252 spdk_thread_lib_fini(); 253 254 return 0; 255 } 256 257 static void 258 test_compress_operation(void) 259 { 260 } 261 262 static void 263 test_compress_operation_cross_boundary(void) 264 { 265 } 266 267 static void 268 test_vbdev_compress_submit_request(void) 269 { 270 /* Single element block size write */ 271 g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 272 g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE; 273 g_completion_called = false; 274 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 275 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 276 CU_ASSERT(g_completion_called == true); 277 CU_ASSERT(g_io_ctx->orig_io == g_bdev_io); 278 CU_ASSERT(g_io_ctx->comp_bdev == &g_comp_bdev); 279 CU_ASSERT(g_io_ctx->comp_ch == g_comp_ch); 280 281 /* same write but now fail it */ 282 ut_spdk_reduce_vol_op_complete_err = 1; 283 g_completion_called = false; 284 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 285 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 286 CU_ASSERT(g_completion_called == true); 287 288 /* test a read success */ 289 g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ; 290 ut_spdk_reduce_vol_op_complete_err = 0; 291 g_completion_called = false; 292 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 293 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 294 CU_ASSERT(g_completion_called == true); 295 296 /* test a read failure */ 297 ut_spdk_reduce_vol_op_complete_err = 1; 298 g_completion_called = false; 299 vbdev_compress_submit_request(g_io_ch, g_bdev_io); 300 CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 301 CU_ASSERT(g_completion_called == true); 302 } 303 304 static void 305 test_passthru(void) 306 { 307 308 } 309 310 static void 311 test_reset(void) 312 { 313 /* TODO: There are a few different ways to do this given that 314 * the code uses spdk_for_each_channel() to implement reset 315 * handling. SUbmitting w/o UT for this function for now and 316 * will follow up with something shortly. 317 */ 318 } 319 320 static void 321 test_supported_io(void) 322 { 323 324 } 325 326 int 327 main(int argc, char **argv) 328 { 329 CU_pSuite suite = NULL; 330 unsigned int num_failures; 331 332 CU_initialize_registry(); 333 334 suite = CU_add_suite("compress", test_setup, test_cleanup); 335 CU_ADD_TEST(suite, test_compress_operation); 336 CU_ADD_TEST(suite, test_compress_operation_cross_boundary); 337 CU_ADD_TEST(suite, test_vbdev_compress_submit_request); 338 CU_ADD_TEST(suite, test_passthru); 339 CU_ADD_TEST(suite, test_supported_io); 340 CU_ADD_TEST(suite, test_reset); 341 342 num_failures = spdk_ut_run_tests(argc, argv, NULL); 343 CU_cleanup_registry(); 344 return num_failures; 345 } 346