xref: /spdk/examples/accel/perf/accel_perf.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/thread.h"
8 #include "spdk/env.h"
9 #include "spdk/event.h"
10 #include "spdk/log.h"
11 #include "spdk/string.h"
12 #include "spdk/accel.h"
13 #include "spdk/crc32.h"
14 #include "spdk/util.h"
15 #include "spdk/xor.h"
16 #include "spdk/dif.h"
17 
18 #define DATA_PATTERN 0x5a
19 #define ALIGN_4K 0x1000
20 #define COMP_BUF_PAD_PERCENTAGE 1.1L
21 
22 static uint64_t	g_tsc_rate;
23 static uint64_t g_tsc_end;
24 static int g_rc;
25 static int g_xfer_size_bytes = 4096;
26 static int g_block_size_bytes = 512;
27 static int g_md_size_bytes = 8;
28 static int g_queue_depth = 32;
29 /* g_allocate_depth indicates how many tasks we allocate per worker. It will
30  * be at least as much as the queue depth.
31  */
32 static int g_allocate_depth = 0;
33 static int g_threads_per_core = 1;
34 static int g_time_in_sec = 5;
35 static uint32_t g_crc32c_seed = 0;
36 static uint32_t g_chained_count = 1;
37 static int g_fail_percent_goal = 0;
38 static uint8_t g_fill_pattern = 255;
39 static uint32_t g_xor_src_count = 2;
40 static bool g_verify = false;
41 static const char *g_workload_type = NULL;
42 static enum spdk_accel_opcode g_workload_selection = SPDK_ACCEL_OPC_LAST;
43 static const char *g_module_name = NULL;
44 static struct worker_thread *g_workers = NULL;
45 static int g_num_workers = 0;
46 static char *g_cd_file_in_name = NULL;
47 static pthread_mutex_t g_workers_lock = PTHREAD_MUTEX_INITIALIZER;
48 static struct spdk_app_opts g_opts = {};
49 
50 struct ap_compress_seg {
51 	void		*uncompressed_data;
52 	uint32_t	uncompressed_len;
53 	struct iovec	*uncompressed_iovs;
54 	uint32_t	uncompressed_iovcnt;
55 
56 	void		*compressed_data;
57 	uint32_t	compressed_len;
58 	uint32_t	compressed_len_padded;
59 	struct iovec	*compressed_iovs;
60 	uint32_t	compressed_iovcnt;
61 
62 	STAILQ_ENTRY(ap_compress_seg)	link;
63 };
64 
65 static STAILQ_HEAD(, ap_compress_seg) g_compress_segs = STAILQ_HEAD_INITIALIZER(g_compress_segs);
66 
67 struct worker_thread;
68 static void accel_done(void *ref, int status);
69 
70 struct display_info {
71 	int core;
72 	int thread;
73 };
74 
75 struct ap_task {
76 	void			*src;
77 	struct iovec		*src_iovs;
78 	uint32_t		src_iovcnt;
79 	void			**sources;
80 	struct iovec		*dst_iovs;
81 	uint32_t		dst_iovcnt;
82 	void			*dst;
83 	void			*dst2;
84 	uint32_t		crc_dst;
85 	uint32_t		compressed_sz;
86 	struct ap_compress_seg *cur_seg;
87 	struct worker_thread	*worker;
88 	int			expected_status; /* used for the compare operation */
89 	uint32_t		num_blocks; /* used for the DIF related operations */
90 	struct spdk_dif_ctx	dif_ctx;
91 	struct spdk_dif_error	dif_err;
92 	TAILQ_ENTRY(ap_task)	link;
93 };
94 
95 struct worker_thread {
96 	struct spdk_io_channel		*ch;
97 	struct spdk_accel_opcode_stats	stats;
98 	uint64_t			xfer_failed;
99 	uint64_t			injected_miscompares;
100 	uint64_t			current_queue_depth;
101 	TAILQ_HEAD(, ap_task)		tasks_pool;
102 	struct worker_thread		*next;
103 	unsigned			core;
104 	struct spdk_thread		*thread;
105 	bool				is_draining;
106 	struct spdk_poller		*is_draining_poller;
107 	struct spdk_poller		*stop_poller;
108 	void				*task_base;
109 	struct display_info		display;
110 	enum spdk_accel_opcode		workload;
111 };
112 
113 static void
114 dump_user_config(void)
115 {
116 	const char *module_name = NULL;
117 	int rc;
118 
119 	rc = spdk_accel_get_opc_module_name(g_workload_selection, &module_name);
120 	if (rc) {
121 		printf("error getting module name (%d)\n", rc);
122 	}
123 
124 	printf("\nSPDK Configuration:\n");
125 	printf("Core mask:      %s\n\n", g_opts.reactor_mask);
126 	printf("Accel Perf Configuration:\n");
127 	printf("Workload Type:  %s\n", g_workload_type);
128 	if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
129 	    g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C) {
130 		printf("CRC-32C seed:   %u\n", g_crc32c_seed);
131 	} else if (g_workload_selection == SPDK_ACCEL_OPC_FILL) {
132 		printf("Fill pattern:   0x%x\n", g_fill_pattern);
133 	} else if ((g_workload_selection == SPDK_ACCEL_OPC_COMPARE) && g_fail_percent_goal > 0) {
134 		printf("Failure inject: %u percent\n", g_fail_percent_goal);
135 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
136 		printf("Source buffers: %u\n", g_xor_src_count);
137 	}
138 	if (g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
139 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
140 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
141 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
142 		printf("Vector size:    %u bytes\n", g_xfer_size_bytes);
143 		printf("Transfer size:  %u bytes\n", g_xfer_size_bytes * g_chained_count);
144 	} else {
145 		printf("Transfer size:  %u bytes\n", g_xfer_size_bytes);
146 	}
147 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
148 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
149 		printf("Block size:     %u bytes\n", g_block_size_bytes);
150 		printf("Metadata size:  %u bytes\n", g_md_size_bytes);
151 	}
152 	printf("Vector count    %u\n", g_chained_count);
153 	printf("Module:         %s\n", module_name);
154 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS ||
155 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
156 		printf("File Name:      %s\n", g_cd_file_in_name);
157 	}
158 	printf("Queue depth:    %u\n", g_queue_depth);
159 	printf("Allocate depth: %u\n", g_allocate_depth);
160 	printf("# threads/core: %u\n", g_threads_per_core);
161 	printf("Run time:       %u seconds\n", g_time_in_sec);
162 	printf("Verify:         %s\n\n", g_verify ? "Yes" : "No");
163 }
164 
165 static void
166 usage(void)
167 {
168 	printf("accel_perf options:\n");
169 	printf("\t[-h help message]\n");
170 	printf("\t[-q queue depth per core]\n");
171 	printf("\t[-C for supported workloads, use this value to configure the io vector size to test (default 1)\n");
172 	printf("\t[-T number of threads per core\n");
173 	printf("\t[-o transfer size in bytes (default: 4KiB. For compress/decompress, 0 means the input file size)]\n");
174 	printf("\t[-t time in seconds]\n");
175 	printf("\t[-w workload type must be one of these: copy, fill, crc32c, copy_crc32c, compare, compress, decompress, dualcast, xor,\n");
176 	printf("\t[                                       dif_verify, , dif_generate, dif_generate_copy\n");
177 	printf("\t[-M assign module to the operation, not compatible with accel_assign_opc RPC\n");
178 	printf("\t[-l for compress/decompress workloads, name of uncompressed input file\n");
179 	printf("\t[-S for crc32c workload, use this seed value (default 0)\n");
180 	printf("\t[-P for compare workload, percentage of operations that should miscompare (percent, default 0)\n");
181 	printf("\t[-f for fill workload, use this BYTE value (default 255)\n");
182 	printf("\t[-x for xor workload, use this number of source buffers (default, minimum: 2)]\n");
183 	printf("\t[-y verify result if this switch is on]\n");
184 	printf("\t[-a tasks to allocate per core (default: same value as -q)]\n");
185 	printf("\t\tCan be used to spread operations across a wider range of memory.\n");
186 }
187 
188 static int
189 parse_args(int ch, char *arg)
190 {
191 	int argval = 0;
192 
193 	switch (ch) {
194 	case 'a':
195 	case 'C':
196 	case 'f':
197 	case 'T':
198 	case 'o':
199 	case 'P':
200 	case 'q':
201 	case 'S':
202 	case 't':
203 	case 'x':
204 		argval = spdk_strtol(optarg, 10);
205 		if (argval < 0) {
206 			fprintf(stderr, "-%c option must be non-negative.\n", ch);
207 			usage();
208 			return 1;
209 		}
210 		break;
211 	default:
212 		break;
213 	};
214 
215 	switch (ch) {
216 	case 'a':
217 		g_allocate_depth = argval;
218 		break;
219 	case 'C':
220 		g_chained_count = argval;
221 		break;
222 	case 'l':
223 		g_cd_file_in_name = optarg;
224 		break;
225 	case 'f':
226 		g_fill_pattern = (uint8_t)argval;
227 		break;
228 	case 'T':
229 		g_threads_per_core = argval;
230 		break;
231 	case 'o':
232 		g_xfer_size_bytes = argval;
233 		break;
234 	case 'P':
235 		g_fail_percent_goal = argval;
236 		break;
237 	case 'q':
238 		g_queue_depth = argval;
239 		break;
240 	case 'S':
241 		g_crc32c_seed = argval;
242 		break;
243 	case 't':
244 		g_time_in_sec = argval;
245 		break;
246 	case 'x':
247 		g_xor_src_count = argval;
248 		break;
249 	case 'y':
250 		g_verify = true;
251 		break;
252 	case 'w':
253 		g_workload_type = optarg;
254 		if (!strcmp(g_workload_type, "copy")) {
255 			g_workload_selection = SPDK_ACCEL_OPC_COPY;
256 		} else if (!strcmp(g_workload_type, "fill")) {
257 			g_workload_selection = SPDK_ACCEL_OPC_FILL;
258 		} else if (!strcmp(g_workload_type, "crc32c")) {
259 			g_workload_selection = SPDK_ACCEL_OPC_CRC32C;
260 		} else if (!strcmp(g_workload_type, "copy_crc32c")) {
261 			g_workload_selection = SPDK_ACCEL_OPC_COPY_CRC32C;
262 		} else if (!strcmp(g_workload_type, "compare")) {
263 			g_workload_selection = SPDK_ACCEL_OPC_COMPARE;
264 		} else if (!strcmp(g_workload_type, "dualcast")) {
265 			g_workload_selection = SPDK_ACCEL_OPC_DUALCAST;
266 		} else if (!strcmp(g_workload_type, "compress")) {
267 			g_workload_selection = SPDK_ACCEL_OPC_COMPRESS;
268 		} else if (!strcmp(g_workload_type, "decompress")) {
269 			g_workload_selection = SPDK_ACCEL_OPC_DECOMPRESS;
270 		} else if (!strcmp(g_workload_type, "xor")) {
271 			g_workload_selection = SPDK_ACCEL_OPC_XOR;
272 		} else if (!strcmp(g_workload_type, "dif_verify")) {
273 			g_workload_selection = SPDK_ACCEL_OPC_DIF_VERIFY;
274 		} else if (!strcmp(g_workload_type, "dif_generate")) {
275 			g_workload_selection = SPDK_ACCEL_OPC_DIF_GENERATE;
276 		} else if (!strcmp(g_workload_type, "dif_generate_copy")) {
277 			g_workload_selection = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
278 		} else {
279 			fprintf(stderr, "Unsupported workload type: %s\n", optarg);
280 			usage();
281 			return 1;
282 		}
283 		break;
284 	case 'M':
285 		g_module_name = optarg;
286 		break;
287 
288 	default:
289 		usage();
290 		return 1;
291 	}
292 
293 	return 0;
294 }
295 
296 static int dump_result(void);
297 static void
298 unregister_worker(void *arg1)
299 {
300 	struct worker_thread *worker = arg1;
301 
302 	if (worker->ch) {
303 		spdk_accel_get_opcode_stats(worker->ch, worker->workload,
304 					    &worker->stats, sizeof(worker->stats));
305 		spdk_put_io_channel(worker->ch);
306 		worker->ch = NULL;
307 	}
308 	free(worker->task_base);
309 	spdk_thread_exit(spdk_get_thread());
310 	pthread_mutex_lock(&g_workers_lock);
311 	assert(g_num_workers >= 1);
312 	if (--g_num_workers == 0) {
313 		pthread_mutex_unlock(&g_workers_lock);
314 		/* Only dump results on successful runs */
315 		if (g_rc == 0) {
316 			g_rc = dump_result();
317 		}
318 		spdk_app_stop(g_rc);
319 	} else {
320 		pthread_mutex_unlock(&g_workers_lock);
321 	}
322 }
323 
324 static void
325 accel_perf_construct_iovs(void *buf, uint64_t sz, struct iovec *iovs, uint32_t iovcnt)
326 {
327 	uint64_t ele_size;
328 	uint8_t *data;
329 	uint32_t i;
330 
331 	ele_size = spdk_divide_round_up(sz, iovcnt);
332 
333 	data = buf;
334 	for (i = 0; i < iovcnt; i++) {
335 		ele_size = spdk_min(ele_size, sz);
336 		assert(ele_size > 0);
337 
338 		iovs[i].iov_base = data;
339 		iovs[i].iov_len = ele_size;
340 
341 		data += ele_size;
342 		sz -= ele_size;
343 	}
344 	assert(sz == 0);
345 }
346 
347 static int
348 _get_task_data_bufs(struct ap_task *task)
349 {
350 	uint32_t align = 0;
351 	uint32_t i = 0;
352 	int src_buff_len = g_xfer_size_bytes;
353 	int dst_buff_len = g_xfer_size_bytes;
354 	struct spdk_dif_ctx_init_ext_opts dif_opts;
355 	uint32_t num_blocks, transfer_size_with_md;
356 	int rc;
357 
358 	/* For dualcast, the DSA HW requires 4K alignment on destination addresses but
359 	 * we do this for all modules to keep it simple.
360 	 */
361 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST) {
362 		align = ALIGN_4K;
363 	}
364 
365 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS ||
366 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
367 		task->cur_seg = STAILQ_FIRST(&g_compress_segs);
368 
369 		if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
370 			dst_buff_len = task->cur_seg->compressed_len_padded;
371 		}
372 
373 		task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
374 		if (task->dst == NULL) {
375 			fprintf(stderr, "Unable to alloc dst buffer\n");
376 			return -ENOMEM;
377 		}
378 
379 		task->dst_iovs = calloc(g_chained_count, sizeof(struct iovec));
380 		if (!task->dst_iovs) {
381 			fprintf(stderr, "cannot allocate task->dst_iovs for task=%p\n", task);
382 			return -ENOMEM;
383 		}
384 		task->dst_iovcnt = g_chained_count;
385 		accel_perf_construct_iovs(task->dst, dst_buff_len, task->dst_iovs, task->dst_iovcnt);
386 
387 		return 0;
388 	}
389 
390 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
391 		task->dst_iovcnt = g_chained_count;
392 		task->dst_iovs = calloc(task->dst_iovcnt, sizeof(struct iovec));
393 		if (!task->dst_iovs) {
394 			fprintf(stderr, "cannot allocate task->dst_iovs for task=%p\n", task);
395 			return -ENOMEM;
396 		}
397 
398 		num_blocks = g_xfer_size_bytes / g_block_size_bytes;
399 		/* Add bytes for each block for metadata */
400 		transfer_size_with_md = g_xfer_size_bytes + (num_blocks * g_md_size_bytes);
401 		task->num_blocks = num_blocks;
402 
403 		for (i = 0; i < task->dst_iovcnt; i++) {
404 			task->dst_iovs[i].iov_base = spdk_dma_zmalloc(transfer_size_with_md, 0, NULL);
405 			if (task->dst_iovs[i].iov_base == NULL) {
406 				return -ENOMEM;
407 			}
408 			task->dst_iovs[i].iov_len = transfer_size_with_md;
409 		}
410 
411 		dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
412 		dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
413 
414 		rc = spdk_dif_ctx_init(&task->dif_ctx,
415 				       g_block_size_bytes + g_md_size_bytes,
416 				       g_md_size_bytes, true, true,
417 				       SPDK_DIF_TYPE1,
418 				       SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
419 				       0x123, 0xFFFF, 0x234, 0, 0, &dif_opts);
420 		if (rc != 0) {
421 			fprintf(stderr, "Initialization of DIF context failed\n");
422 			return rc;
423 		}
424 	}
425 
426 	if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
427 	    g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
428 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
429 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
430 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
431 		assert(g_chained_count > 0);
432 		task->src_iovcnt = g_chained_count;
433 		task->src_iovs = calloc(task->src_iovcnt, sizeof(struct iovec));
434 		if (!task->src_iovs) {
435 			fprintf(stderr, "cannot allocated task->src_iovs fot task=%p\n", task);
436 			return -ENOMEM;
437 		}
438 
439 		if (g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C) {
440 			dst_buff_len = g_xfer_size_bytes * g_chained_count;
441 		}
442 
443 		if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
444 		    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
445 			src_buff_len += (g_xfer_size_bytes / g_block_size_bytes) * g_md_size_bytes;
446 		}
447 
448 		for (i = 0; i < task->src_iovcnt; i++) {
449 			task->src_iovs[i].iov_base = spdk_dma_zmalloc(src_buff_len, 0, NULL);
450 			if (task->src_iovs[i].iov_base == NULL) {
451 				return -ENOMEM;
452 			}
453 			memset(task->src_iovs[i].iov_base, DATA_PATTERN, src_buff_len);
454 			task->src_iovs[i].iov_len = src_buff_len;
455 		}
456 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
457 		assert(g_xor_src_count > 1);
458 		task->sources = calloc(g_xor_src_count, sizeof(*task->sources));
459 		if (!task->sources) {
460 			return -ENOMEM;
461 		}
462 
463 		for (i = 0; i < g_xor_src_count; i++) {
464 			task->sources[i] = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
465 			if (!task->sources[i]) {
466 				return -ENOMEM;
467 			}
468 			memset(task->sources[i], DATA_PATTERN, g_xfer_size_bytes);
469 		}
470 	} else {
471 		task->src = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
472 		if (task->src == NULL) {
473 			fprintf(stderr, "Unable to alloc src buffer\n");
474 			return -ENOMEM;
475 		}
476 
477 		/* For fill, set the entire src buffer so we can check if verify is enabled. */
478 		if (g_workload_selection == SPDK_ACCEL_OPC_FILL) {
479 			memset(task->src, g_fill_pattern, g_xfer_size_bytes);
480 		} else {
481 			memset(task->src, DATA_PATTERN, g_xfer_size_bytes);
482 		}
483 	}
484 
485 	if (g_workload_selection != SPDK_ACCEL_OPC_CRC32C &&
486 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_VERIFY &&
487 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_GENERATE &&
488 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
489 		task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
490 		if (task->dst == NULL) {
491 			fprintf(stderr, "Unable to alloc dst buffer\n");
492 			return -ENOMEM;
493 		}
494 
495 		/* For compare we want the buffers to match, otherwise not. */
496 		if (g_workload_selection == SPDK_ACCEL_OPC_COMPARE) {
497 			memset(task->dst, DATA_PATTERN, dst_buff_len);
498 		} else {
499 			memset(task->dst, ~DATA_PATTERN, dst_buff_len);
500 		}
501 	}
502 
503 	/* For dualcast 2 buffers are needed for the operation.  */
504 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST ||
505 	    (g_workload_selection == SPDK_ACCEL_OPC_XOR && g_verify)) {
506 		task->dst2 = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
507 		if (task->dst2 == NULL) {
508 			fprintf(stderr, "Unable to alloc dst buffer\n");
509 			return -ENOMEM;
510 		}
511 		memset(task->dst2, ~DATA_PATTERN, g_xfer_size_bytes);
512 	}
513 
514 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
515 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
516 		dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
517 		dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
518 
519 		task->num_blocks = (g_xfer_size_bytes * g_chained_count) / g_block_size_bytes;
520 
521 		rc = spdk_dif_ctx_init(&task->dif_ctx,
522 				       g_block_size_bytes + g_md_size_bytes,
523 				       g_md_size_bytes, true, true,
524 				       SPDK_DIF_TYPE1,
525 				       SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
526 				       16, 0xFFFF, 10, 0, 0, &dif_opts);
527 		if (rc != 0) {
528 			fprintf(stderr, "Initialization of DIF context failed, error (%d)\n", rc);
529 			return rc;
530 		}
531 
532 		if (g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
533 			rc = spdk_dif_generate(task->src_iovs, task->src_iovcnt, task->num_blocks, &task->dif_ctx);
534 			if (rc != 0) {
535 				fprintf(stderr, "Generation of DIF failed, error (%d)\n", rc);
536 				return rc;
537 			}
538 		}
539 	}
540 
541 	return 0;
542 }
543 
544 inline static struct ap_task *
545 _get_task(struct worker_thread *worker)
546 {
547 	struct ap_task *task;
548 
549 	if (!TAILQ_EMPTY(&worker->tasks_pool)) {
550 		task = TAILQ_FIRST(&worker->tasks_pool);
551 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
552 	} else {
553 		fprintf(stderr, "Unable to get ap_task\n");
554 		return NULL;
555 	}
556 
557 	return task;
558 }
559 
560 /* Submit one operation using the same ap task that just completed. */
561 static void
562 _submit_single(struct worker_thread *worker, struct ap_task *task)
563 {
564 	int random_num;
565 	int rc = 0;
566 	int flags = 0;
567 
568 	assert(worker);
569 
570 	switch (worker->workload) {
571 	case SPDK_ACCEL_OPC_COPY:
572 		rc = spdk_accel_submit_copy(worker->ch, task->dst, task->src,
573 					    g_xfer_size_bytes, flags, accel_done, task);
574 		break;
575 	case SPDK_ACCEL_OPC_FILL:
576 		/* For fill use the first byte of the task->dst buffer */
577 		rc = spdk_accel_submit_fill(worker->ch, task->dst, *(uint8_t *)task->src,
578 					    g_xfer_size_bytes, flags, accel_done, task);
579 		break;
580 	case SPDK_ACCEL_OPC_CRC32C:
581 		rc = spdk_accel_submit_crc32cv(worker->ch, &task->crc_dst,
582 					       task->src_iovs, task->src_iovcnt, g_crc32c_seed,
583 					       accel_done, task);
584 		break;
585 	case SPDK_ACCEL_OPC_COPY_CRC32C:
586 		rc = spdk_accel_submit_copy_crc32cv(worker->ch, task->dst, task->src_iovs, task->src_iovcnt,
587 						    &task->crc_dst, g_crc32c_seed, flags, accel_done, task);
588 		break;
589 	case SPDK_ACCEL_OPC_COMPARE:
590 		random_num = rand() % 100;
591 		if (random_num < g_fail_percent_goal) {
592 			task->expected_status = -EILSEQ;
593 			*(uint8_t *)task->dst = ~DATA_PATTERN;
594 		} else {
595 			task->expected_status = 0;
596 			*(uint8_t *)task->dst = DATA_PATTERN;
597 		}
598 		rc = spdk_accel_submit_compare(worker->ch, task->dst, task->src,
599 					       g_xfer_size_bytes, accel_done, task);
600 		break;
601 	case SPDK_ACCEL_OPC_DUALCAST:
602 		rc = spdk_accel_submit_dualcast(worker->ch, task->dst, task->dst2,
603 						task->src, g_xfer_size_bytes, flags, accel_done, task);
604 		break;
605 	case SPDK_ACCEL_OPC_COMPRESS:
606 		task->src_iovs = task->cur_seg->uncompressed_iovs;
607 		task->src_iovcnt = task->cur_seg->uncompressed_iovcnt;
608 		rc = spdk_accel_submit_compress(worker->ch, task->dst, task->cur_seg->compressed_len_padded,
609 						task->src_iovs,
610 						task->src_iovcnt, &task->compressed_sz, flags, accel_done, task);
611 		break;
612 	case SPDK_ACCEL_OPC_DECOMPRESS:
613 		task->src_iovs = task->cur_seg->compressed_iovs;
614 		task->src_iovcnt = task->cur_seg->compressed_iovcnt;
615 		rc = spdk_accel_submit_decompress(worker->ch, task->dst_iovs, task->dst_iovcnt, task->src_iovs,
616 						  task->src_iovcnt, NULL, flags, accel_done, task);
617 		break;
618 	case SPDK_ACCEL_OPC_XOR:
619 		rc = spdk_accel_submit_xor(worker->ch, task->dst, task->sources, g_xor_src_count,
620 					   g_xfer_size_bytes, accel_done, task);
621 		break;
622 	case SPDK_ACCEL_OPC_DIF_VERIFY:
623 		rc = spdk_accel_submit_dif_verify(worker->ch, task->src_iovs, task->src_iovcnt, task->num_blocks,
624 						  &task->dif_ctx, &task->dif_err, accel_done, task);
625 		break;
626 	case SPDK_ACCEL_OPC_DIF_GENERATE:
627 		rc = spdk_accel_submit_dif_generate(worker->ch, task->src_iovs, task->src_iovcnt, task->num_blocks,
628 						    &task->dif_ctx, accel_done, task);
629 		break;
630 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
631 		rc = spdk_accel_submit_dif_generate_copy(worker->ch, task->dst_iovs, task->dst_iovcnt,
632 				task->src_iovs, task->src_iovcnt,
633 				task->num_blocks, &task->dif_ctx, accel_done, task);
634 		break;
635 	default:
636 		assert(false);
637 		break;
638 
639 	}
640 
641 	worker->current_queue_depth++;
642 	if (rc) {
643 		accel_done(task, rc);
644 	}
645 }
646 
647 static void
648 _free_task_buffers(struct ap_task *task)
649 {
650 	uint32_t i;
651 
652 	if (g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS ||
653 	    g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
654 		free(task->dst_iovs);
655 	} else if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
656 		   g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
657 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
658 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
659 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
660 		if (task->src_iovs) {
661 			for (i = 0; i < task->src_iovcnt; i++) {
662 				if (task->src_iovs[i].iov_base) {
663 					spdk_dma_free(task->src_iovs[i].iov_base);
664 				}
665 			}
666 			free(task->src_iovs);
667 		}
668 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
669 		if (task->sources) {
670 			for (i = 0; i < g_xor_src_count; i++) {
671 				spdk_dma_free(task->sources[i]);
672 			}
673 			free(task->sources);
674 		}
675 	} else {
676 		spdk_dma_free(task->src);
677 	}
678 
679 	spdk_dma_free(task->dst);
680 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST || g_workload_selection == SPDK_ACCEL_OPC_XOR) {
681 		spdk_dma_free(task->dst2);
682 	}
683 
684 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
685 		if (task->dst_iovs) {
686 			for (i = 0; i < task->dst_iovcnt; i++) {
687 				if (task->dst_iovs[i].iov_base) {
688 					spdk_dma_free(task->dst_iovs[i].iov_base);
689 				}
690 			}
691 			free(task->dst_iovs);
692 		}
693 	}
694 }
695 
696 static int
697 _vector_memcmp(void *_dst, struct iovec *src_src_iovs, uint32_t iovcnt)
698 {
699 	uint32_t i;
700 	uint32_t ttl_len = 0;
701 	uint8_t *dst = (uint8_t *)_dst;
702 
703 	for (i = 0; i < iovcnt; i++) {
704 		if (memcmp(dst, src_src_iovs[i].iov_base, src_src_iovs[i].iov_len)) {
705 			return -1;
706 		}
707 		dst += src_src_iovs[i].iov_len;
708 		ttl_len += src_src_iovs[i].iov_len;
709 	}
710 
711 	if (ttl_len != iovcnt * g_xfer_size_bytes) {
712 		return -1;
713 	}
714 
715 	return 0;
716 }
717 
718 static int _worker_stop(void *arg);
719 
720 static void
721 accel_done(void *arg1, int status)
722 {
723 	struct ap_task *task = arg1;
724 	struct worker_thread *worker = task->worker;
725 	uint32_t sw_crc32c;
726 	struct spdk_dif_error err_blk;
727 
728 	assert(worker);
729 	assert(worker->current_queue_depth > 0);
730 
731 	if (g_verify && status == 0) {
732 		switch (worker->workload) {
733 		case SPDK_ACCEL_OPC_COPY_CRC32C:
734 			sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
735 			if (task->crc_dst != sw_crc32c) {
736 				SPDK_NOTICELOG("CRC-32C miscompare\n");
737 				worker->xfer_failed++;
738 			}
739 			if (_vector_memcmp(task->dst, task->src_iovs, task->src_iovcnt)) {
740 				SPDK_NOTICELOG("Data miscompare\n");
741 				worker->xfer_failed++;
742 			}
743 			break;
744 		case SPDK_ACCEL_OPC_CRC32C:
745 			sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
746 			if (task->crc_dst != sw_crc32c) {
747 				SPDK_NOTICELOG("CRC-32C miscompare\n");
748 				worker->xfer_failed++;
749 			}
750 			break;
751 		case SPDK_ACCEL_OPC_COPY:
752 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
753 				SPDK_NOTICELOG("Data miscompare\n");
754 				worker->xfer_failed++;
755 			}
756 			break;
757 		case SPDK_ACCEL_OPC_DUALCAST:
758 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
759 				SPDK_NOTICELOG("Data miscompare, first destination\n");
760 				worker->xfer_failed++;
761 			}
762 			if (memcmp(task->src, task->dst2, g_xfer_size_bytes)) {
763 				SPDK_NOTICELOG("Data miscompare, second destination\n");
764 				worker->xfer_failed++;
765 			}
766 			break;
767 		case SPDK_ACCEL_OPC_FILL:
768 			if (memcmp(task->dst, task->src, g_xfer_size_bytes)) {
769 				SPDK_NOTICELOG("Data miscompare\n");
770 				worker->xfer_failed++;
771 			}
772 			break;
773 		case SPDK_ACCEL_OPC_COMPARE:
774 			break;
775 		case SPDK_ACCEL_OPC_COMPRESS:
776 			break;
777 		case SPDK_ACCEL_OPC_DECOMPRESS:
778 			if (memcmp(task->dst, task->cur_seg->uncompressed_data, task->cur_seg->uncompressed_len)) {
779 				SPDK_NOTICELOG("Data miscompare on decompression\n");
780 				worker->xfer_failed++;
781 			}
782 			break;
783 		case SPDK_ACCEL_OPC_XOR:
784 			if (spdk_xor_gen(task->dst2, task->sources, g_xor_src_count,
785 					 g_xfer_size_bytes) != 0) {
786 				SPDK_ERRLOG("Failed to generate xor for verification\n");
787 			} else if (memcmp(task->dst, task->dst2, g_xfer_size_bytes)) {
788 				SPDK_NOTICELOG("Data miscompare\n");
789 				worker->xfer_failed++;
790 			}
791 			break;
792 		case SPDK_ACCEL_OPC_DIF_VERIFY:
793 			break;
794 		case SPDK_ACCEL_OPC_DIF_GENERATE:
795 			if (spdk_dif_verify(task->src_iovs, task->src_iovcnt, task->num_blocks,
796 					    &task->dif_ctx, &err_blk) != 0) {
797 				SPDK_NOTICELOG("Data miscompare, "
798 					       "err_type %u, expected %lu, actual %lu, err_offset %u\n",
799 					       err_blk.err_type, err_blk.expected,
800 					       err_blk.actual, err_blk.err_offset);
801 				worker->xfer_failed++;
802 			}
803 			break;
804 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
805 			if (spdk_dif_verify(task->dst_iovs, task->dst_iovcnt, task->num_blocks,
806 					    &task->dif_ctx, &err_blk) != 0) {
807 				SPDK_NOTICELOG("Data miscompare, "
808 					       "err_type %u, expected %lu, actual %lu, err_offset %u\n",
809 					       err_blk.err_type, err_blk.expected,
810 					       err_blk.actual, err_blk.err_offset);
811 				worker->xfer_failed++;
812 			}
813 			break;
814 		default:
815 			assert(false);
816 			break;
817 		}
818 	}
819 
820 	if (worker->workload == SPDK_ACCEL_OPC_COMPRESS ||
821 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
822 		/* Advance the task to the next segment */
823 		task->cur_seg = STAILQ_NEXT(task->cur_seg, link);
824 		if (task->cur_seg == NULL) {
825 			task->cur_seg = STAILQ_FIRST(&g_compress_segs);
826 		}
827 	}
828 
829 	if (task->expected_status == -EILSEQ) {
830 		assert(status != 0);
831 		worker->injected_miscompares++;
832 		status = 0;
833 	} else if (status) {
834 		/* Expected to pass but the accel module reported an error (ex: COMPARE operation). */
835 		worker->xfer_failed++;
836 	}
837 
838 	worker->current_queue_depth--;
839 
840 	if (!worker->is_draining && status == 0) {
841 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
842 		task = _get_task(worker);
843 		_submit_single(worker, task);
844 	} else {
845 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
846 	}
847 }
848 
849 static int
850 dump_result(void)
851 {
852 	uint64_t total_completed = 0;
853 	uint64_t total_failed = 0;
854 	uint64_t total_miscompared = 0;
855 	uint64_t total_xfer_per_sec, total_bw_in_MiBps;
856 	struct worker_thread *worker = g_workers;
857 	char tmp[64];
858 
859 	printf("\n%-12s %20s %16s %16s %16s\n",
860 	       "Core,Thread", "Transfers", "Bandwidth", "Failed", "Miscompares");
861 	printf("------------------------------------------------------------------------------------\n");
862 	while (worker != NULL) {
863 
864 		uint64_t xfer_per_sec = worker->stats.executed / g_time_in_sec;
865 		uint64_t bw_in_MiBps = worker->stats.num_bytes /
866 				       (g_time_in_sec * 1024 * 1024);
867 
868 		total_completed += worker->stats.executed;
869 		total_failed += worker->xfer_failed;
870 		total_miscompared += worker->injected_miscompares;
871 
872 		snprintf(tmp, sizeof(tmp), "%u,%u", worker->display.core, worker->display.thread);
873 		if (xfer_per_sec) {
874 			printf("%-12s %18" PRIu64 "/s %10" PRIu64 " MiB/s %16"PRIu64 " %16" PRIu64 "\n",
875 			       tmp, xfer_per_sec, bw_in_MiBps, worker->xfer_failed,
876 			       worker->injected_miscompares);
877 		}
878 
879 		worker = worker->next;
880 	}
881 
882 	total_xfer_per_sec = total_completed / g_time_in_sec;
883 	total_bw_in_MiBps = (total_completed * g_xfer_size_bytes) /
884 			    (g_time_in_sec * 1024 * 1024);
885 
886 	printf("====================================================================================\n");
887 	printf("%-12s %18" PRIu64 "/s %10" PRIu64 " MiB/s %16"PRIu64 " %16" PRIu64 "\n",
888 	       "Total", total_xfer_per_sec, total_bw_in_MiBps, total_failed, total_miscompared);
889 
890 	return total_failed ? 1 : 0;
891 }
892 
893 static inline void
894 _free_task_buffers_in_pool(struct worker_thread *worker)
895 {
896 	struct ap_task *task;
897 
898 	assert(worker);
899 	while ((task = TAILQ_FIRST(&worker->tasks_pool))) {
900 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
901 		_free_task_buffers(task);
902 	}
903 }
904 
905 static int
906 _check_draining(void *arg)
907 {
908 	struct worker_thread *worker = arg;
909 
910 	assert(worker);
911 
912 	if (worker->current_queue_depth == 0) {
913 		_free_task_buffers_in_pool(worker);
914 		spdk_poller_unregister(&worker->is_draining_poller);
915 		unregister_worker(worker);
916 	}
917 
918 	return SPDK_POLLER_BUSY;
919 }
920 
921 static int
922 _worker_stop(void *arg)
923 {
924 	struct worker_thread *worker = arg;
925 
926 	assert(worker);
927 
928 	spdk_poller_unregister(&worker->stop_poller);
929 
930 	/* now let the worker drain and check it's outstanding IO with a poller */
931 	worker->is_draining = true;
932 	worker->is_draining_poller = SPDK_POLLER_REGISTER(_check_draining, worker, 0);
933 
934 	return SPDK_POLLER_BUSY;
935 }
936 
937 static void shutdown_cb(void);
938 
939 static void
940 _init_thread(void *arg1)
941 {
942 	struct worker_thread *worker;
943 	struct ap_task *task;
944 	int i, num_tasks = g_allocate_depth;
945 	struct display_info *display = arg1;
946 
947 	worker = calloc(1, sizeof(*worker));
948 	if (worker == NULL) {
949 		fprintf(stderr, "Unable to allocate worker\n");
950 		free(display);
951 		spdk_thread_exit(spdk_get_thread());
952 		goto no_worker;
953 	}
954 
955 	worker->workload = g_workload_selection;
956 	worker->display.core = display->core;
957 	worker->display.thread = display->thread;
958 	free(display);
959 	worker->core = spdk_env_get_current_core();
960 	worker->thread = spdk_get_thread();
961 	pthread_mutex_lock(&g_workers_lock);
962 	g_num_workers++;
963 	worker->next = g_workers;
964 	g_workers = worker;
965 	pthread_mutex_unlock(&g_workers_lock);
966 	worker->ch = spdk_accel_get_io_channel();
967 	if (worker->ch == NULL) {
968 		fprintf(stderr, "Unable to get an accel channel\n");
969 		goto error;
970 	}
971 
972 	TAILQ_INIT(&worker->tasks_pool);
973 
974 	worker->task_base = calloc(num_tasks, sizeof(struct ap_task));
975 	if (worker->task_base == NULL) {
976 		fprintf(stderr, "Could not allocate task base.\n");
977 		goto error;
978 	}
979 
980 	task = worker->task_base;
981 	for (i = 0; i < num_tasks; i++) {
982 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
983 		task->worker = worker;
984 		if (_get_task_data_bufs(task)) {
985 			fprintf(stderr, "Unable to get data bufs\n");
986 			goto error;
987 		}
988 		task++;
989 	}
990 
991 	/* Register a poller that will stop the worker at time elapsed */
992 	worker->stop_poller = SPDK_POLLER_REGISTER(_worker_stop, worker,
993 			      g_time_in_sec * 1000000ULL);
994 
995 	/* Load up queue depth worth of operations. */
996 	for (i = 0; i < g_queue_depth; i++) {
997 		task = _get_task(worker);
998 		if (task == NULL) {
999 			goto error;
1000 		}
1001 
1002 		_submit_single(worker, task);
1003 	}
1004 	return;
1005 error:
1006 
1007 	_free_task_buffers_in_pool(worker);
1008 	free(worker->task_base);
1009 no_worker:
1010 	shutdown_cb();
1011 	g_rc = -1;
1012 }
1013 
1014 static void
1015 accel_perf_start(void *arg1)
1016 {
1017 	struct spdk_cpuset tmp_cpumask = {};
1018 	char thread_name[32];
1019 	uint32_t i;
1020 	int j;
1021 	struct spdk_thread *thread;
1022 	struct display_info *display;
1023 
1024 	g_tsc_rate = spdk_get_ticks_hz();
1025 	g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
1026 
1027 	dump_user_config();
1028 
1029 	printf("Running for %d seconds...\n", g_time_in_sec);
1030 	fflush(stdout);
1031 
1032 	/* Create worker threads for each core that was specified. */
1033 	SPDK_ENV_FOREACH_CORE(i) {
1034 		for (j = 0; j < g_threads_per_core; j++) {
1035 			snprintf(thread_name, sizeof(thread_name), "ap_worker_%u_%u", i, j);
1036 			spdk_cpuset_zero(&tmp_cpumask);
1037 			spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
1038 			thread = spdk_thread_create(thread_name, &tmp_cpumask);
1039 			display = calloc(1, sizeof(*display));
1040 			if (display == NULL) {
1041 				fprintf(stderr, "Unable to allocate memory\n");
1042 				spdk_app_stop(-1);
1043 				return;
1044 			}
1045 			display->core = i;
1046 			display->thread = j;
1047 			spdk_thread_send_msg(thread, _init_thread, display);
1048 		}
1049 	}
1050 }
1051 
1052 static void
1053 accel_perf_free_compress_segs(void)
1054 {
1055 	struct ap_compress_seg *seg, *tmp;
1056 
1057 	STAILQ_FOREACH_SAFE(seg, &g_compress_segs, link, tmp) {
1058 		free(seg->uncompressed_iovs);
1059 		free(seg->compressed_iovs);
1060 		spdk_dma_free(seg->compressed_data);
1061 		spdk_dma_free(seg->uncompressed_data);
1062 		STAILQ_REMOVE_HEAD(&g_compress_segs, link);
1063 		free(seg);
1064 	}
1065 }
1066 
1067 struct accel_perf_prep_ctx {
1068 	FILE			*file;
1069 	long			remaining;
1070 	struct spdk_io_channel	*ch;
1071 	struct ap_compress_seg	*cur_seg;
1072 };
1073 
1074 static void accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx);
1075 
1076 static void
1077 accel_perf_prep_process_seg_cpl(void *ref, int status)
1078 {
1079 	struct accel_perf_prep_ctx *ctx = ref;
1080 	struct ap_compress_seg *seg;
1081 
1082 	if (status != 0) {
1083 		fprintf(stderr, "error (%d) on initial compress completion\n", status);
1084 		spdk_dma_free(ctx->cur_seg->compressed_data);
1085 		spdk_dma_free(ctx->cur_seg->uncompressed_data);
1086 		free(ctx->cur_seg);
1087 		spdk_put_io_channel(ctx->ch);
1088 		fclose(ctx->file);
1089 		free(ctx);
1090 		spdk_app_stop(-status);
1091 		return;
1092 	}
1093 
1094 	seg = ctx->cur_seg;
1095 
1096 	if (g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
1097 		seg->compressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
1098 		if (seg->compressed_iovs == NULL) {
1099 			fprintf(stderr, "unable to allocate iovec\n");
1100 			spdk_dma_free(seg->compressed_data);
1101 			spdk_dma_free(seg->uncompressed_data);
1102 			free(seg);
1103 			spdk_put_io_channel(ctx->ch);
1104 			fclose(ctx->file);
1105 			free(ctx);
1106 			spdk_app_stop(-ENOMEM);
1107 			return;
1108 		}
1109 		seg->compressed_iovcnt = g_chained_count;
1110 
1111 		accel_perf_construct_iovs(seg->compressed_data, seg->compressed_len, seg->compressed_iovs,
1112 					  seg->compressed_iovcnt);
1113 	}
1114 
1115 	STAILQ_INSERT_TAIL(&g_compress_segs, seg, link);
1116 	ctx->remaining -= seg->uncompressed_len;
1117 
1118 	accel_perf_prep_process_seg(ctx);
1119 }
1120 
1121 static void
1122 accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx)
1123 {
1124 	struct ap_compress_seg *seg;
1125 	int sz, sz_read, sz_padded;
1126 	void *ubuf, *cbuf;
1127 	struct iovec iov[1];
1128 	int rc;
1129 
1130 	if (ctx->remaining == 0) {
1131 		spdk_put_io_channel(ctx->ch);
1132 		fclose(ctx->file);
1133 		free(ctx);
1134 		accel_perf_start(NULL);
1135 		return;
1136 	}
1137 
1138 	sz = spdk_min(ctx->remaining, g_xfer_size_bytes);
1139 	/* Add 10% pad to the compress buffer for incompressible data. Note that a real app
1140 	 * would likely either deal with the failure of not having a large enough buffer
1141 	 * by submitting another operation with a larger one.  Or, like the vbdev module
1142 	 * does, just accept the error and use the data uncompressed marking it as such in
1143 	 * its own metadata so that in the future it doesn't try to decompress uncompressed
1144 	 * data, etc.
1145 	 */
1146 	sz_padded = sz * COMP_BUF_PAD_PERCENTAGE;
1147 
1148 	ubuf = spdk_dma_zmalloc(sz, ALIGN_4K, NULL);
1149 	if (!ubuf) {
1150 		fprintf(stderr, "unable to allocate uncompress buffer\n");
1151 		rc = -ENOMEM;
1152 		goto error;
1153 	}
1154 
1155 	cbuf = spdk_dma_malloc(sz_padded, ALIGN_4K, NULL);
1156 	if (!cbuf) {
1157 		fprintf(stderr, "unable to allocate compress buffer\n");
1158 		rc = -ENOMEM;
1159 		spdk_dma_free(ubuf);
1160 		goto error;
1161 	}
1162 
1163 	seg = calloc(1, sizeof(*seg));
1164 	if (!seg) {
1165 		fprintf(stderr, "unable to allocate comp/decomp segment\n");
1166 		spdk_dma_free(ubuf);
1167 		spdk_dma_free(cbuf);
1168 		rc = -ENOMEM;
1169 		goto error;
1170 	}
1171 
1172 	sz_read = fread(ubuf, sizeof(uint8_t), sz, ctx->file);
1173 	if (sz_read != sz) {
1174 		fprintf(stderr, "unable to read input file\n");
1175 		free(seg);
1176 		spdk_dma_free(ubuf);
1177 		spdk_dma_free(cbuf);
1178 		rc = -errno;
1179 		goto error;
1180 	}
1181 
1182 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
1183 		seg->uncompressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
1184 		if (seg->uncompressed_iovs == NULL) {
1185 			fprintf(stderr, "unable to allocate iovec\n");
1186 			free(seg);
1187 			spdk_dma_free(ubuf);
1188 			spdk_dma_free(cbuf);
1189 			rc = -ENOMEM;
1190 			goto error;
1191 		}
1192 		seg->uncompressed_iovcnt = g_chained_count;
1193 		accel_perf_construct_iovs(ubuf, sz, seg->uncompressed_iovs, seg->uncompressed_iovcnt);
1194 	}
1195 
1196 	seg->uncompressed_data = ubuf;
1197 	seg->uncompressed_len = sz;
1198 	seg->compressed_data = cbuf;
1199 	seg->compressed_len = sz;
1200 	seg->compressed_len_padded = sz_padded;
1201 
1202 	ctx->cur_seg = seg;
1203 	iov[0].iov_base = seg->uncompressed_data;
1204 	iov[0].iov_len = seg->uncompressed_len;
1205 	/* Note that anytime a call is made to spdk_accel_submit_compress() there's a chance
1206 	 * it will fail with -ENOMEM in the event that the destination buffer is not large enough
1207 	 * to hold the compressed data.  This example app simply adds 10% buffer for compressed data
1208 	 * but real applications may want to consider a more sophisticated method.
1209 	 */
1210 	rc = spdk_accel_submit_compress(ctx->ch, seg->compressed_data, seg->compressed_len_padded, iov, 1,
1211 					&seg->compressed_len, 0, accel_perf_prep_process_seg_cpl, ctx);
1212 	if (rc < 0) {
1213 		fprintf(stderr, "error (%d) on initial compress submission\n", rc);
1214 		goto error;
1215 	}
1216 
1217 	return;
1218 
1219 error:
1220 	spdk_put_io_channel(ctx->ch);
1221 	fclose(ctx->file);
1222 	free(ctx);
1223 	spdk_app_stop(rc);
1224 }
1225 
1226 static void
1227 accel_perf_prep(void *arg1)
1228 {
1229 	struct accel_perf_prep_ctx *ctx;
1230 	const char *module_name = NULL;
1231 	int rc = 0;
1232 
1233 	if (g_module_name) {
1234 		rc = spdk_accel_get_opc_module_name(g_workload_selection, &module_name);
1235 		if (rc != 0 || strcmp(g_module_name, module_name) != 0) {
1236 			fprintf(stderr, "Module '%s' was assigned via JSON config or RPC, instead of '%s'\n",
1237 				module_name, g_module_name);
1238 			fprintf(stderr, "-M option is not compatible with accel_assign_opc RPC\n");
1239 			rc = -EINVAL;
1240 			goto error_end;
1241 		}
1242 	}
1243 
1244 	if (g_workload_selection != SPDK_ACCEL_OPC_COMPRESS &&
1245 	    g_workload_selection != SPDK_ACCEL_OPC_DECOMPRESS) {
1246 		accel_perf_start(arg1);
1247 		return;
1248 	}
1249 
1250 	if (g_cd_file_in_name == NULL) {
1251 		fprintf(stdout, "A filename is required.\n");
1252 		rc = -EINVAL;
1253 		goto error_end;
1254 	}
1255 
1256 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS && g_verify) {
1257 		fprintf(stdout, "\nCompression does not support the verify option, aborting.\n");
1258 		rc = -ENOTSUP;
1259 		goto error_end;
1260 	}
1261 
1262 	printf("Preparing input file...\n");
1263 
1264 	ctx = calloc(1, sizeof(*ctx));
1265 	if (ctx == NULL) {
1266 		rc = -ENOMEM;
1267 		goto error_end;
1268 	}
1269 
1270 	ctx->file = fopen(g_cd_file_in_name, "r");
1271 	if (ctx->file == NULL) {
1272 		fprintf(stderr, "Could not open file %s.\n", g_cd_file_in_name);
1273 		rc = -errno;
1274 		goto error_ctx;
1275 	}
1276 
1277 	fseek(ctx->file, 0L, SEEK_END);
1278 	ctx->remaining = ftell(ctx->file);
1279 	fseek(ctx->file, 0L, SEEK_SET);
1280 
1281 	ctx->ch = spdk_accel_get_io_channel();
1282 	if (ctx->ch == NULL) {
1283 		rc = -EAGAIN;
1284 		goto error_file;
1285 	}
1286 
1287 	if (g_xfer_size_bytes == 0) {
1288 		/* size of 0 means "file at a time" */
1289 		g_xfer_size_bytes = ctx->remaining;
1290 	}
1291 
1292 	accel_perf_prep_process_seg(ctx);
1293 	return;
1294 
1295 error_file:
1296 	fclose(ctx->file);
1297 error_ctx:
1298 	free(ctx);
1299 error_end:
1300 	spdk_app_stop(rc);
1301 }
1302 
1303 static void
1304 worker_shutdown(void *ctx)
1305 {
1306 	_worker_stop(ctx);
1307 }
1308 
1309 static void
1310 shutdown_cb(void)
1311 {
1312 	struct worker_thread *worker;
1313 
1314 	pthread_mutex_lock(&g_workers_lock);
1315 	if (!g_workers) {
1316 		spdk_app_stop(1);
1317 		goto unlock;
1318 	}
1319 
1320 	worker = g_workers;
1321 	while (worker) {
1322 		spdk_thread_send_msg(worker->thread, worker_shutdown, worker);
1323 		worker = worker->next;
1324 	}
1325 unlock:
1326 	pthread_mutex_unlock(&g_workers_lock);
1327 }
1328 
1329 int
1330 main(int argc, char **argv)
1331 {
1332 	struct worker_thread *worker, *tmp;
1333 	int rc;
1334 
1335 	pthread_mutex_init(&g_workers_lock, NULL);
1336 	spdk_app_opts_init(&g_opts, sizeof(g_opts));
1337 	g_opts.name = "accel_perf";
1338 	g_opts.reactor_mask = "0x1";
1339 	g_opts.shutdown_cb = shutdown_cb;
1340 
1341 	rc = spdk_app_parse_args(argc, argv, &g_opts, "a:C:o:q:t:yw:M:P:f:T:l:S:x:", NULL,
1342 				 parse_args, usage);
1343 	if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) {
1344 		return rc == SPDK_APP_PARSE_ARGS_HELP ? 0 : 1;
1345 	}
1346 
1347 	if (g_workload_selection == SPDK_ACCEL_OPC_LAST) {
1348 		fprintf(stderr, "Must provide a workload type\n");
1349 		usage();
1350 		return -1;
1351 	}
1352 
1353 	if (g_allocate_depth > 0 && g_queue_depth > g_allocate_depth) {
1354 		fprintf(stdout, "allocate depth must be at least as big as queue depth\n");
1355 		usage();
1356 		return -1;
1357 	}
1358 
1359 	if (g_allocate_depth == 0) {
1360 		g_allocate_depth = g_queue_depth;
1361 	}
1362 
1363 	if ((g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
1364 	     g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
1365 	     g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
1366 	     g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE) &&
1367 	    g_chained_count == 0) {
1368 		usage();
1369 		return -1;
1370 	}
1371 
1372 	if (g_workload_selection == SPDK_ACCEL_OPC_XOR && g_xor_src_count < 2) {
1373 		usage();
1374 		return -1;
1375 	}
1376 
1377 	if (g_module_name && spdk_accel_assign_opc(g_workload_selection, g_module_name)) {
1378 		fprintf(stderr, "Was not able to assign '%s' module to the workload\n", g_module_name);
1379 		usage();
1380 		return -1;
1381 	}
1382 
1383 	g_rc = spdk_app_start(&g_opts, accel_perf_prep, NULL);
1384 	if (g_rc) {
1385 		SPDK_ERRLOG("ERROR starting application\n");
1386 	}
1387 
1388 	pthread_mutex_destroy(&g_workers_lock);
1389 
1390 	worker = g_workers;
1391 	while (worker) {
1392 		tmp = worker->next;
1393 		free(worker);
1394 		worker = tmp;
1395 	}
1396 	accel_perf_free_compress_segs();
1397 	spdk_app_fini();
1398 	return g_rc;
1399 }
1400