xref: /spdk/test/unit/lib/bdev/raid/concat.c/concat_ut.c (revision 784b9d48746955f210926648a0131f84f58de76f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2022 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "spdk/env.h"
9 #include "thread/thread_internal.h"
10 #include "spdk_internal/mock.h"
11 
12 #include "bdev/raid/bdev_raid.h"
13 #include "bdev/raid/concat.c"
14 
15 #define BLOCK_LEN (4096)
16 
17 enum CONCAT_IO_TYPE {
18 	CONCAT_NONE = 0,
19 	CONCAT_WRITEV,
20 	CONCAT_READV,
21 	CONCAT_FLUSH,
22 	CONCAT_UNMAP,
23 };
24 
25 struct spdk_bdev_desc {
26 	struct spdk_bdev *bdev;
27 };
28 
29 #define MAX_RECORDS (10)
30 /*
31  * Store the information of io requests sent to the underlying bdevs.
32  * For a single null payload request to the concat bdev,
33  * we may send multiple requests to the underling bdevs,
34  * so we store the io request information to arrays.
35  */
36 struct req_records {
37 	uint64_t offset_blocks[MAX_RECORDS];
38 	uint64_t num_blocks[MAX_RECORDS];
39 	enum CONCAT_IO_TYPE io_type[MAX_RECORDS];
40 	int count;
41 	void *md;
42 } g_req_records;
43 
44 /*
45  * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks
46  * functions will return 0.
47  * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks
48  * functions will return -ENOMEM.
49  * We always set it to false before an IO request, then the raid_bdev_queue_io_wait
50  * function will re-submit the request, and the raid_bdev_queue_io_wait function will
51  * set g_succeed to true, then the IO will succeed next time.
52  */
53 bool g_succeed;
54 
55 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module));
56 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io,
57 				      enum spdk_bdev_io_status status));
58 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
59 DEFINE_STUB(raid_bdev_io_complete_part, bool,
60 	    (struct raid_bdev_io *raid_io, uint64_t completed,
61 	     enum spdk_bdev_io_status status),
62 	    true);
63 
64 int
65 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
66 			   struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
67 			   spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
68 {
69 	if (g_succeed) {
70 		int i = g_req_records.count;
71 
72 		g_req_records.offset_blocks[i] = offset_blocks;
73 		g_req_records.num_blocks[i] = num_blocks;
74 		g_req_records.io_type[i] = CONCAT_READV;
75 		g_req_records.count++;
76 		cb(NULL, true, cb_arg);
77 		g_req_records.md = opts->metadata;
78 		return 0;
79 	} else {
80 		return -ENOMEM;
81 	}
82 }
83 
84 int
85 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
86 			    struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
87 			    spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts)
88 {
89 	if (g_succeed) {
90 		int i = g_req_records.count;
91 
92 		g_req_records.offset_blocks[i] = offset_blocks;
93 		g_req_records.num_blocks[i] = num_blocks;
94 		g_req_records.io_type[i] = CONCAT_WRITEV;
95 		g_req_records.count++;
96 		cb(NULL, true, cb_arg);
97 		g_req_records.md = opts->metadata;
98 		return 0;
99 	} else {
100 		return -ENOMEM;
101 	}
102 }
103 
104 int
105 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
106 		       uint64_t offset_blocks, uint64_t num_blocks,
107 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
108 {
109 	if (g_succeed) {
110 		int i = g_req_records.count;
111 
112 		g_req_records.offset_blocks[i] = offset_blocks;
113 		g_req_records.num_blocks[i] = num_blocks;
114 		g_req_records.io_type[i] = CONCAT_UNMAP;
115 		g_req_records.count++;
116 		cb(NULL, true, cb_arg);
117 		return 0;
118 	} else {
119 		return -ENOMEM;
120 	}
121 }
122 
123 int
124 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
125 		       uint64_t offset_blocks, uint64_t num_blocks,
126 		       spdk_bdev_io_completion_cb cb, void *cb_arg)
127 {
128 	if (g_succeed) {
129 		int i = g_req_records.count;
130 
131 		g_req_records.offset_blocks[i] = offset_blocks;
132 		g_req_records.num_blocks[i] = num_blocks;
133 		g_req_records.io_type[i] = CONCAT_FLUSH;
134 		g_req_records.count++;
135 		cb(NULL, true, cb_arg);
136 		return 0;
137 	} else {
138 		return -ENOMEM;
139 	}
140 }
141 
142 void
143 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev,
144 			struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn)
145 {
146 	g_succeed = true;
147 	cb_fn(raid_io);
148 }
149 
150 static void
151 init_globals(void)
152 {
153 	int i;
154 
155 	for (i = 0; i < MAX_RECORDS; i++) {
156 		g_req_records.offset_blocks[i] = 0;
157 		g_req_records.num_blocks[i] = 0;
158 		g_req_records.io_type[i] = CONCAT_NONE;
159 	}
160 	g_req_records.count = 0;
161 	g_succeed = false;
162 }
163 
164 struct concat_params {
165 	uint8_t num_base_bdevs;
166 	uint64_t base_bdev_blockcnt;
167 	uint32_t base_bdev_blocklen;
168 	uint32_t strip_size;
169 };
170 
171 static struct concat_params *g_params;
172 static size_t g_params_count;
173 
174 #define ARRAY_FOR_EACH(a, e) \
175 	for (e = a; e < a + SPDK_COUNTOF(a); e++)
176 
177 #define CONCAT_PARAMS_FOR_EACH(p) \
178 	for (p = g_params; p < g_params + g_params_count; p++)
179 
180 static int
181 test_setup(void)
182 {
183 	uint8_t num_base_bdevs_values[] = { 3, 4, 5 };
184 	uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 };
185 	uint32_t base_bdev_blocklen_values[] = { 512, 4096 };
186 	uint32_t strip_size_kb_values[] = { 1, 4, 128 };
187 	uint8_t *num_base_bdevs;
188 	uint64_t *base_bdev_blockcnt;
189 	uint32_t *base_bdev_blocklen;
190 	uint32_t *strip_size_kb;
191 	struct concat_params *params;
192 
193 	g_params_count = SPDK_COUNTOF(num_base_bdevs_values) *
194 			 SPDK_COUNTOF(base_bdev_blockcnt_values) *
195 			 SPDK_COUNTOF(base_bdev_blocklen_values) *
196 			 SPDK_COUNTOF(strip_size_kb_values);
197 	g_params = calloc(g_params_count, sizeof(*g_params));
198 	if (!g_params) {
199 		return -ENOMEM;
200 	}
201 
202 	params = g_params;
203 
204 	ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) {
205 		ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) {
206 			ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) {
207 				ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) {
208 					params->num_base_bdevs = *num_base_bdevs;
209 					params->base_bdev_blockcnt = *base_bdev_blockcnt;
210 					params->base_bdev_blocklen = *base_bdev_blocklen;
211 					params->strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen;
212 					if (params->strip_size == 0 ||
213 					    params->strip_size > *base_bdev_blockcnt) {
214 						g_params_count--;
215 						continue;
216 					}
217 					params++;
218 				}
219 			}
220 		}
221 	}
222 
223 	return 0;
224 }
225 
226 static int
227 test_cleanup(void)
228 {
229 	free(g_params);
230 	return 0;
231 }
232 
233 static struct raid_bdev *
234 create_raid_bdev(struct concat_params *params)
235 {
236 	struct raid_bdev *raid_bdev;
237 	struct raid_base_bdev_info *base_info;
238 
239 	raid_bdev = calloc(1, sizeof(*raid_bdev));
240 	SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
241 
242 	raid_bdev->module = &g_concat_module;
243 	raid_bdev->num_base_bdevs = params->num_base_bdevs;
244 	raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
245 					   sizeof(struct raid_base_bdev_info));
246 	SPDK_CU_ASSERT_FATAL(raid_bdev->base_bdev_info != NULL);
247 
248 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
249 		base_info->bdev = calloc(1, sizeof(*base_info->bdev));
250 		SPDK_CU_ASSERT_FATAL(base_info->bdev != NULL);
251 		base_info->desc = calloc(1, sizeof(*base_info->desc));
252 		SPDK_CU_ASSERT_FATAL(base_info->desc != NULL);
253 
254 		base_info->bdev->blockcnt = params->base_bdev_blockcnt;
255 		base_info->bdev->blocklen = params->base_bdev_blocklen;
256 	}
257 
258 	raid_bdev->strip_size = params->strip_size;
259 	raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
260 	raid_bdev->bdev.blocklen = params->base_bdev_blocklen;
261 
262 	return raid_bdev;
263 }
264 
265 static void
266 delete_raid_bdev(struct raid_bdev *raid_bdev)
267 {
268 	struct raid_base_bdev_info *base_info;
269 
270 	RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
271 		free(base_info->bdev);
272 		free(base_info->desc);
273 	}
274 	free(raid_bdev->base_bdev_info);
275 	free(raid_bdev);
276 }
277 
278 static struct raid_bdev *
279 create_concat(struct concat_params *params)
280 {
281 	struct raid_bdev *raid_bdev = create_raid_bdev(params);
282 
283 	CU_ASSERT(concat_start(raid_bdev) == 0);
284 	return raid_bdev;
285 }
286 
287 static void
288 delete_concat(struct raid_bdev *raid_bdev)
289 {
290 	concat_stop(raid_bdev);
291 	delete_raid_bdev(raid_bdev);
292 }
293 
294 static void
295 test_concat_start(void)
296 {
297 	struct raid_bdev *raid_bdev;
298 	struct concat_params *params;
299 	struct concat_block_range *block_range;
300 	uint64_t total_blockcnt;
301 	int i;
302 
303 	CONCAT_PARAMS_FOR_EACH(params) {
304 		raid_bdev = create_concat(params);
305 		block_range = raid_bdev->module_private;
306 		total_blockcnt = 0;
307 		for (i = 0; i < params->num_base_bdevs; i++) {
308 			CU_ASSERT(block_range[i].start == total_blockcnt);
309 			CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt);
310 			total_blockcnt += params->base_bdev_blockcnt;
311 		}
312 		delete_concat(raid_bdev);
313 	}
314 }
315 
316 static void
317 bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
318 {
319 	if (bdev_io->u.bdev.iovs) {
320 		if (bdev_io->u.bdev.iovs->iov_base) {
321 			free(bdev_io->u.bdev.iovs->iov_base);
322 		}
323 		free(bdev_io->u.bdev.iovs);
324 	}
325 
326 	if (bdev_io->u.bdev.ext_opts) {
327 		if (bdev_io->u.bdev.ext_opts->metadata) {
328 			bdev_io->u.bdev.ext_opts->metadata = NULL;
329 		}
330 		free(bdev_io->u.bdev.ext_opts);
331 	}
332 	free(bdev_io);
333 }
334 
335 static void
336 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev,
337 		   uint64_t lba, uint64_t blocks, int16_t iotype)
338 {
339 	struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch);
340 
341 	bdev_io->bdev = bdev;
342 	bdev_io->u.bdev.offset_blocks = lba;
343 	bdev_io->u.bdev.num_blocks = blocks;
344 	bdev_io->type = iotype;
345 
346 	if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) {
347 		return;
348 	}
349 
350 	bdev_io->u.bdev.iovcnt = 1;
351 	bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
352 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
353 	bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096);
354 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
355 	bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN;
356 	bdev_io->internal.ch = channel;
357 	bdev_io->u.bdev.ext_opts = calloc(1, sizeof(struct spdk_bdev_ext_io_opts));
358 	SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.ext_opts != NULL);
359 	bdev_io->u.bdev.ext_opts->metadata = (void *)0xAEDFEBAC;
360 }
361 
362 static void
363 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
364 {
365 	struct raid_bdev *raid_bdev;
366 	struct spdk_bdev_io *bdev_io;
367 	struct spdk_io_channel *ch;
368 	struct raid_bdev_io *raid_io;
369 	struct raid_bdev_io_channel *raid_ch;
370 	uint64_t lba, blocks;
371 	int i;
372 
373 	lba = 0;
374 	blocks = 1;
375 	for (i = 0; i < params->num_base_bdevs; i++) {
376 		init_globals();
377 		raid_bdev = create_concat(params);
378 		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
379 		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
380 		raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
381 		raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
382 		SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
383 		raid_ch->base_channel = calloc(params->num_base_bdevs,
384 					       sizeof(struct spdk_io_channel));
385 		SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
386 		raid_io->raid_ch = raid_ch;
387 		raid_io->raid_bdev = raid_bdev;
388 		ch = calloc(1, sizeof(struct spdk_io_channel));
389 		SPDK_CU_ASSERT_FATAL(ch != NULL);
390 
391 		switch (io_type) {
392 		case CONCAT_WRITEV:
393 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE);
394 			concat_submit_rw_request(raid_io);
395 			break;
396 		case CONCAT_READV:
397 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ);
398 			concat_submit_rw_request(raid_io);
399 			break;
400 		case CONCAT_UNMAP:
401 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
402 			concat_submit_null_payload_request(raid_io);
403 			break;
404 		case CONCAT_FLUSH:
405 			bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
406 			concat_submit_null_payload_request(raid_io);
407 			break;
408 		default:
409 			CU_ASSERT(false);
410 		}
411 
412 		/*
413 		 * We submit request to the first lba of each underlying device,
414 		 * so the offset of the underling device should always be 0.
415 		 */
416 		CU_ASSERT(g_req_records.offset_blocks[0] == 0);
417 		CU_ASSERT(g_req_records.num_blocks[0] == blocks);
418 		CU_ASSERT(g_req_records.io_type[0] == io_type);
419 		CU_ASSERT(g_req_records.count == 1);
420 		CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC);
421 		bdev_io_cleanup(bdev_io);
422 		free(ch);
423 		free(raid_ch->base_channel);
424 		free(raid_ch);
425 		delete_concat(raid_bdev);
426 		lba += params->base_bdev_blockcnt;
427 	}
428 }
429 
430 static void
431 test_concat_rw(void)
432 {
433 	struct concat_params *params;
434 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV};
435 	enum CONCAT_IO_TYPE io_type;
436 	int i;
437 
438 	CONCAT_PARAMS_FOR_EACH(params) {
439 		for (i = 0; i < 2; i ++) {
440 			io_type = io_type_list[i];
441 			submit_and_verify_rw(io_type, params);
442 		}
443 	}
444 }
445 
446 static void
447 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct concat_params *params)
448 {
449 	struct raid_bdev *raid_bdev;
450 	struct spdk_bdev_io *bdev_io;
451 	struct spdk_io_channel *ch;
452 	struct raid_bdev_io *raid_io;
453 	struct raid_bdev_io_channel *raid_ch;
454 	uint64_t lba, blocks;
455 
456 	/*
457 	 * In this unittest, all base bdevs have the same blockcnt.
458 	 * If the base_bdev_blockcnt > 1, the request will start from
459 	 * the second bdev, and across two bdevs.
460 	 * If the base_bdev_blockcnt == 1, the request will start from
461 	 * the third bdev. In this case, if there are only 3 bdevs,
462 	 * we can not set blocks to base_bdev_blockcnt + 1 because the request
463 	 * will be beyond the end of the last bdev, so we set the blocks to 1
464 	 */
465 	lba = params->base_bdev_blockcnt + 1;
466 	if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) {
467 		blocks = 1;
468 	} else {
469 		blocks = params->base_bdev_blockcnt + 1;
470 	}
471 	init_globals();
472 	raid_bdev = create_concat(params);
473 	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
474 	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
475 	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
476 	raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel));
477 	SPDK_CU_ASSERT_FATAL(raid_ch != NULL);
478 	raid_ch->base_channel = calloc(params->num_base_bdevs,
479 				       sizeof(struct spdk_io_channel));
480 	SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL);
481 	raid_io->raid_ch = raid_ch;
482 	raid_io->raid_bdev = raid_bdev;
483 	ch = calloc(1, sizeof(struct spdk_io_channel));
484 	SPDK_CU_ASSERT_FATAL(ch != NULL);
485 
486 	switch (io_type) {
487 	case CONCAT_UNMAP:
488 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP);
489 		concat_submit_null_payload_request(raid_io);
490 		break;
491 	case CONCAT_FLUSH:
492 		bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH);
493 		concat_submit_null_payload_request(raid_io);
494 		break;
495 	default:
496 		CU_ASSERT(false);
497 	}
498 
499 	if (params->base_bdev_blockcnt == 1) {
500 		if (params->num_base_bdevs == 3) {
501 			CU_ASSERT(g_req_records.count == 1);
502 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
503 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
504 		} else {
505 			CU_ASSERT(g_req_records.count == 2);
506 			CU_ASSERT(g_req_records.offset_blocks[0] == 0);
507 			CU_ASSERT(g_req_records.num_blocks[0] == 1);
508 			CU_ASSERT(g_req_records.io_type[0] == io_type);
509 			CU_ASSERT(g_req_records.offset_blocks[1] == 0);
510 			CU_ASSERT(g_req_records.num_blocks[1] == 1);
511 			CU_ASSERT(g_req_records.io_type[1] == io_type);
512 		}
513 	} else {
514 		CU_ASSERT(g_req_records.count == 2);
515 		CU_ASSERT(g_req_records.offset_blocks[0] == 1);
516 		CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1);
517 		CU_ASSERT(g_req_records.io_type[0] == io_type);
518 		CU_ASSERT(g_req_records.offset_blocks[1] == 0);
519 		CU_ASSERT(g_req_records.num_blocks[1] == 2);
520 		CU_ASSERT(g_req_records.io_type[1] == io_type);
521 	}
522 	bdev_io_cleanup(bdev_io);
523 	free(ch);
524 	free(raid_ch->base_channel);
525 	free(raid_ch);
526 	delete_concat(raid_bdev);
527 }
528 
529 static void
530 test_concat_null_payload(void)
531 {
532 	struct concat_params *params;
533 	enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP};
534 	enum CONCAT_IO_TYPE io_type;
535 	int i;
536 
537 	CONCAT_PARAMS_FOR_EACH(params) {
538 		for (i = 0; i < 2; i ++) {
539 			io_type = io_type_list[i];
540 			submit_and_verify_null_payload(io_type, params);
541 		}
542 	}
543 }
544 
545 int
546 main(int argc, char **argv)
547 {
548 	CU_pSuite suite = NULL;
549 	unsigned int num_failures;
550 
551 	CU_set_error_action(CUEA_ABORT);
552 	CU_initialize_registry();
553 
554 	suite = CU_add_suite("concat", test_setup, test_cleanup);
555 	CU_ADD_TEST(suite, test_concat_start);
556 	CU_ADD_TEST(suite, test_concat_rw);
557 	CU_ADD_TEST(suite, test_concat_null_payload);
558 
559 	CU_basic_set_mode(CU_BRM_VERBOSE);
560 	CU_basic_run_tests();
561 	num_failures = CU_get_number_of_failures();
562 	CU_cleanup_registry();
563 	return num_failures;
564 }
565