1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2022 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "spdk/env.h"
9
10 #include "common/lib/ut_multithread.c"
11
12 #include "bdev/raid/concat.c"
13 #include "../common.c"
14
15 DEFINE_STUB(spdk_bdev_readv_blocks_with_md, int, (struct spdk_bdev_desc *desc,
16 struct spdk_io_channel *ch,
17 struct iovec *iov, int iovcnt, void *md,
18 uint64_t offset_blocks, uint64_t num_blocks,
19 spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
20 DEFINE_STUB(spdk_bdev_writev_blocks_with_md, int, (struct spdk_bdev_desc *desc,
21 struct spdk_io_channel *ch,
22 struct iovec *iov, int iovcnt, void *md,
23 uint64_t offset_blocks, uint64_t num_blocks,
24 spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
25 DEFINE_STUB(raid_bdev_remap_dix_reftag, int, (void *md_buf, uint64_t num_blocks,
26 struct spdk_bdev *bdev, uint32_t remapped_offset), -1);
27
28 #define BLOCK_LEN (4096)
29
30 enum CONCAT_IO_TYPE {
31 CONCAT_NONE = 0,
32 CONCAT_WRITEV,
33 CONCAT_READV,
34 CONCAT_FLUSH,
35 CONCAT_UNMAP,
36 };
37
38 #define MAX_RECORDS (10)
39 /*
40 * Store the information of io requests sent to the underlying bdevs.
41 * For a single null payload request to the concat bdev,
42 * we may send multiple requests to the underling bdevs,
43 * so we store the io request information to arrays.
44 */
45 struct req_records {
46 uint64_t offset_blocks[MAX_RECORDS];
47 uint64_t num_blocks[MAX_RECORDS];
48 enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
49 int count;
50 void *md;
51 } g_req_records;
52
53 /*
54 * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
55 * functions will return 0.
56 * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
57 * functions will return -ENOMEM.
58 * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
59 * function will re-submit the request, and the raid_bdev_queue_io_wait function will
60 * set g_succeed to true, then the IO will succeed next time.
61 */
62 bool g_succeed;
63
64 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
65 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
66
67 int
spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,struct iovec * iov,int iovcnt,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg,struct spdk_bdev_ext_io_opts * opts)68 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
69 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
70 spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
71 {
72 if (g_succeed) {
73 int i = g_req_records.count;
74
75 g_req_records.offset_blocks[i] = offset_blocks;
76 g_req_records.num_blocks[i] = num_blocks;
77 g_req_records.io_type[i] = CONCAT_READV;
78 g_req_records.count++;
79 cb(NULL, true, cb_arg);
80 g_req_records.md = opts->metadata;
81 return 0;
82 } else {
83 return -ENOMEM;
84 }
85 }
86
87 int
spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,struct iovec * iov,int iovcnt,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg,struct spdk_bdev_ext_io_opts * opts)88 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
89 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
90 spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
91 {
92 if (g_succeed) {
93 int i = g_req_records.count;
94
95 g_req_records.offset_blocks[i] = offset_blocks;
96 g_req_records.num_blocks[i] = num_blocks;
97 g_req_records.io_type[i] = CONCAT_WRITEV;
98 g_req_records.count++;
99 cb(NULL, true, cb_arg);
100 g_req_records.md = opts->metadata;
101 return 0;
102 } else {
103 return -ENOMEM;
104 }
105 }
106
107 int
spdk_bdev_unmap_blocks(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg)108 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
109 uint64_t offset_blocks, uint64_t num_blocks,
110 spdk_bdev_io_completion_cb cb, void *cb_arg)
111 {
112 if (g_succeed) {
113 int i = g_req_records.count;
114
115 g_req_records.offset_blocks[i] = offset_blocks;
116 g_req_records.num_blocks[i] = num_blocks;
117 g_req_records.io_type[i] = CONCAT_UNMAP;
118 g_req_records.count++;
119 cb(NULL, true, cb_arg);
120 return 0;
121 } else {
122 return -ENOMEM;
123 }
124 }
125
126 int
spdk_bdev_flush_blocks(struct spdk_bdev_desc * desc,struct spdk_io_channel * ch,uint64_t offset_blocks,uint64_t num_blocks,spdk_bdev_io_completion_cb cb,void * cb_arg)127 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128 uint64_t offset_blocks, uint64_t num_blocks,
129 spdk_bdev_io_completion_cb cb, void *cb_arg)
130 {
131 if (g_succeed) {
132 int i = g_req_records.count;
133
134 g_req_records.offset_blocks[i] = offset_blocks;
135 g_req_records.num_blocks[i] = num_blocks;
136 g_req_records.io_type[i] = CONCAT_FLUSH;
137 g_req_records.count++;
138 cb(NULL, true, cb_arg);
139 return 0;
140 } else {
141 return -ENOMEM;
142 }
143 }
144
145 void
raid_bdev_queue_io_wait(struct raid_bdev_io * raid_io,struct spdk_bdev * bdev,struct spdk_io_channel * ch,spdk_bdev_io_wait_cb cb_fn)146 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
147 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
148 {
149 g_succeed = true;
150 cb_fn(raid_io);
151 }
152
153 void
raid_test_bdev_io_complete(struct raid_bdev_io * raid_io,enum spdk_bdev_io_status status)154 raid_test_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status status)
155 {
156 CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
157 }
158
159 static void
init_globals(void)160 init_globals(void)
161 {
162 int i;
163
164 for (i = 0; i < MAX_RECORDS; i++) {
165 g_req_records.offset_blocks[i] = 0;
166 g_req_records.num_blocks[i] = 0;
167 g_req_records.io_type[i] = CONCAT_NONE;
168 }
169 g_req_records.count = 0;
170 g_succeed = false;
171 }
172
173 static int
test_setup(void)174 test_setup(void)
175 {
176 uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
177 uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
178 uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
179 uint32_t strip_size_kb_values[] = { 1, 4, 128 };
180 uint8_t *num_base_bdevs;
181 uint64_t *base_bdev_blockcnt;
182 uint32_t *base_bdev_blocklen;
183 uint32_t *strip_size_kb;
184 uint64_t params_count;
185 int rc;
186
187 params_count = SPDK_COUNTOF(num_base_bdevs_values) *
188 SPDK_COUNTOF(base_bdev_blockcnt_values) *
189 SPDK_COUNTOF(base_bdev_blocklen_values) *
190 SPDK_COUNTOF(strip_size_kb_values);
191 rc = raid_test_params_alloc(params_count);
192 if (rc) {
193 return rc;
194 }
195
196 ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
197 ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
198 ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
199 ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
200 struct raid_params params = {
201 .num_base_bdevs = *num_base_bdevs,
202 .base_bdev_blockcnt = *base_bdev_blockcnt,
203 .base_bdev_blocklen = *base_bdev_blocklen,
204 .strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen,
205 };
206 if (params.strip_size == 0 ||
207 params.strip_size > params.base_bdev_blockcnt) {
208 continue;
209 }
210 raid_test_params_add(¶ms);
211 }
212 }
213 }
214 }
215
216 return 0;
217 }
218
219 static int
test_cleanup(void)220 test_cleanup(void)
221 {
222 raid_test_params_free();
223 return 0;
224 }
225
226 static struct raid_bdev *
create_concat(struct raid_params * params)227 create_concat(struct raid_params *params)
228 {
229 struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_concat_module);
230
231 CU_ASSERT(concat_start(raid_bdev) == 0);
232 return raid_bdev;
233 }
234
235 static void
delete_concat(struct raid_bdev * raid_bdev)236 delete_concat(struct raid_bdev *raid_bdev)
237 {
238 concat_stop(raid_bdev);
239 raid_test_delete_raid_bdev(raid_bdev);
240 }
241
242 static void
test_concat_start(void)243 test_concat_start(void)
244 {
245 struct raid_bdev *raid_bdev;
246 struct raid_params *params;
247 struct concat_block_range *block_range;
248 uint64_t total_blockcnt;
249 int i;
250
251 RAID_PARAMS_FOR_EACH(params) {
252 raid_bdev = create_concat(params);
253 block_range = raid_bdev->module_private;
254 total_blockcnt = 0;
255 for (i = 0; i < params->num_base_bdevs; i++) {
256 CU_ASSERT(block_range[i].start == total_blockcnt);
257 CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
258 total_blockcnt += params->base_bdev_blockcnt;
259 }
260 delete_concat(raid_bdev);
261 }
262 }
263
264 static void
raid_io_cleanup(struct raid_bdev_io * raid_io)265 raid_io_cleanup(struct raid_bdev_io *raid_io)
266 {
267 if (raid_io->iovs) {
268 free(raid_io->iovs->iov_base);
269 free(raid_io->iovs);
270 }
271
272 free(raid_io);
273 }
274
275 static void
raid_io_initialize(struct raid_bdev_io * raid_io,struct raid_bdev_io_channel * raid_ch,struct raid_bdev * raid_bdev,uint64_t lba,uint64_t blocks,int16_t iotype)276 raid_io_initialize(struct raid_bdev_io *raid_io, struct raid_bdev_io_channel *raid_ch,
277 struct raid_bdev *raid_bdev, uint64_t lba, uint64_t blocks, int16_t iotype)
278 {
279 struct iovec *iovs;
280 int iovcnt;
281 void *md_buf;
282
283 if (iotype == SPDK_BDEV_IO_TYPE_UNMAP || iotype == SPDK_BDEV_IO_TYPE_FLUSH) {
284 iovs = NULL;
285 iovcnt = 0;
286 md_buf = NULL;
287 } else {
288 iovcnt = 1;
289 iovs = calloc(iovcnt, sizeof(struct iovec));
290 SPDK_CU_ASSERT_FATAL(iovs != NULL);
291 iovs->iov_len = raid_io->num_blocks * BLOCK_LEN;
292 iovs->iov_base = calloc(1, iovs->iov_len);
293 SPDK_CU_ASSERT_FATAL(iovs->iov_base != NULL);
294 md_buf = (void *)0xAEDFEBAC;
295 }
296
297 raid_test_bdev_io_init(raid_io, raid_bdev, raid_ch, iotype, lba, blocks, iovs, iovcnt, md_buf);
298 }
299
300 static void
submit_and_verify_rw(enum CONCAT_IO_TYPE io_type,struct raid_params * params)301 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
302 {
303 struct raid_bdev *raid_bdev;
304 struct raid_bdev_io *raid_io;
305 struct raid_bdev_io_channel *raid_ch;
306 uint64_t lba, blocks;
307 int i;
308
309 lba = 0;
310 blocks = 1;
311 for (i = 0; i < params->num_base_bdevs; i++) {
312 init_globals();
313 raid_bdev = create_concat(params);
314 raid_io = calloc(1, sizeof(*raid_io));
315 SPDK_CU_ASSERT_FATAL(raid_io != NULL);
316 raid_ch = raid_test_create_io_channel(raid_bdev);
317
318 switch (io_type) {
319 case CONCAT_WRITEV:
320 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
321 concat_submit_rw_request(raid_io);
322 break;
323 case CONCAT_READV:
324 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
325 concat_submit_rw_request(raid_io);
326 break;
327 case CONCAT_UNMAP:
328 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
329 concat_submit_null_payload_request(raid_io);
330 break;
331 case CONCAT_FLUSH:
332 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
333 concat_submit_null_payload_request(raid_io);
334 break;
335 default:
336 CU_ASSERT(false);
337 }
338
339 /*
340 * We submit request to the first lba of each underlying device,
341 * so the offset of the underling device should always be 0.
342 */
343 CU_ASSERT(g_req_records.offset_blocks[0] == 0);
344 CU_ASSERT(g_req_records.num_blocks[0] == blocks);
345 CU_ASSERT(g_req_records.io_type[0] == io_type);
346 CU_ASSERT(g_req_records.count == 1);
347 CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
348 raid_io_cleanup(raid_io);
349 raid_test_destroy_io_channel(raid_ch);
350 delete_concat(raid_bdev);
351 lba += params->base_bdev_blockcnt;
352 }
353 }
354
355 static void
test_concat_rw(void)356 test_concat_rw(void)
357 {
358 struct raid_params *params;
359 enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
360 enum CONCAT_IO_TYPE io_type;
361 int i;
362
363 RAID_PARAMS_FOR_EACH(params) {
364 for (i = 0; i < 2; i ++) {
365 io_type = io_type_list[i];
366 submit_and_verify_rw(io_type, params);
367 }
368 }
369 }
370
371 static void
submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type,struct raid_params * params)372 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct raid_params *params)
373 {
374 struct raid_bdev *raid_bdev;
375 struct raid_bdev_io *raid_io;
376 struct raid_bdev_io_channel *raid_ch;
377 uint64_t lba, blocks;
378
379 /*
380 * In this unittest, all base bdevs have the same blockcnt.
381 * If the base_bdev_blockcnt > 1, the request will start from
382 * the second bdev, and across two bdevs.
383 * If the base_bdev_blockcnt == 1, the request will start from
384 * the third bdev. In this case, if there are only 3 bdevs,
385 * we can not set blocks to base_bdev_blockcnt + 1 because the request
386 * will be beyond the end of the last bdev, so we set the blocks to 1
387 */
388 lba = params->base_bdev_blockcnt + 1;
389 if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
390 blocks = 1;
391 } else {
392 blocks = params->base_bdev_blockcnt + 1;
393 }
394 init_globals();
395 raid_bdev = create_concat(params);
396 raid_io = calloc(1, sizeof(*raid_io));
397 SPDK_CU_ASSERT_FATAL(raid_io != NULL);
398 raid_ch = raid_test_create_io_channel(raid_bdev);
399
400 switch (io_type) {
401 case CONCAT_UNMAP:
402 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
403 concat_submit_null_payload_request(raid_io);
404 break;
405 case CONCAT_FLUSH:
406 raid_io_initialize(raid_io, raid_ch, raid_bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
407 concat_submit_null_payload_request(raid_io);
408 break;
409 default:
410 CU_ASSERT(false);
411 }
412
413 if (params->base_bdev_blockcnt == 1) {
414 if (params->num_base_bdevs == 3) {
415 CU_ASSERT(g_req_records.count == 1);
416 CU_ASSERT(g_req_records.offset_blocks[0] == 0);
417 CU_ASSERT(g_req_records.num_blocks[0] == 1);
418 } else {
419 CU_ASSERT(g_req_records.count == 2);
420 CU_ASSERT(g_req_records.offset_blocks[0] == 0);
421 CU_ASSERT(g_req_records.num_blocks[0] == 1);
422 CU_ASSERT(g_req_records.io_type[0] == io_type);
423 CU_ASSERT(g_req_records.offset_blocks[1] == 0);
424 CU_ASSERT(g_req_records.num_blocks[1] == 1);
425 CU_ASSERT(g_req_records.io_type[1] == io_type);
426 }
427 } else {
428 CU_ASSERT(g_req_records.count == 2);
429 CU_ASSERT(g_req_records.offset_blocks[0] == 1);
430 CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
431 CU_ASSERT(g_req_records.io_type[0] == io_type);
432 CU_ASSERT(g_req_records.offset_blocks[1] == 0);
433 CU_ASSERT(g_req_records.num_blocks[1] == 2);
434 CU_ASSERT(g_req_records.io_type[1] == io_type);
435 }
436 raid_io_cleanup(raid_io);
437 raid_test_destroy_io_channel(raid_ch);
438 delete_concat(raid_bdev);
439 }
440
441 static void
test_concat_null_payload(void)442 test_concat_null_payload(void)
443 {
444 struct raid_params *params;
445 enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
446 enum CONCAT_IO_TYPE io_type;
447 int i;
448
449 RAID_PARAMS_FOR_EACH(params) {
450 for (i = 0; i < 2; i ++) {
451 io_type = io_type_list[i];
452 submit_and_verify_null_payload(io_type, params);
453 }
454 }
455 }
456
457 int
main(int argc,char ** argv)458 main(int argc, char **argv)
459 {
460 CU_pSuite suite = NULL;
461 unsigned int num_failures;
462
463 CU_initialize_registry();
464
465 suite = CU_add_suite("concat", test_setup, test_cleanup);
466 CU_ADD_TEST(suite, test_concat_start);
467 CU_ADD_TEST(suite, test_concat_rw);
468 CU_ADD_TEST(suite, test_concat_null_payload);
469
470 allocate_threads(1);
471 set_thread(0);
472
473 num_failures = spdk_ut_run_tests(argc, argv, NULL);
474 CU_cleanup_registry();
475
476 free_threads();
477
478 return num_failures;
479 }
480