xref: /spdk/examples/accel/perf/accel_perf.c (revision f387b7fe187572d4505323dfb7a5dc1318638dda)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk/thread.h"
8 #include "spdk/env.h"
9 #include "spdk/event.h"
10 #include "spdk/log.h"
11 #include "spdk/string.h"
12 #include "spdk/accel.h"
13 #include "spdk/crc32.h"
14 #include "spdk/util.h"
15 #include "spdk/xor.h"
16 #include "spdk/dif.h"
17 
18 #define DATA_PATTERN 0x5a
19 #define ALIGN_4K 0x1000
20 #define COMP_BUF_PAD_PERCENTAGE 1.1L
21 
22 static uint64_t	g_tsc_rate;
23 static uint64_t g_tsc_end;
24 static int g_rc;
25 static int g_xfer_size_bytes = 4096;
26 static int g_block_size_bytes = 512;
27 static int g_md_size_bytes = 8;
28 static int g_queue_depth = 32;
29 /* g_allocate_depth indicates how many tasks we allocate per worker. It will
30  * be at least as much as the queue depth.
31  */
32 static int g_allocate_depth = 0;
33 static int g_threads_per_core = 1;
34 static int g_time_in_sec = 5;
35 static uint32_t g_crc32c_seed = 0;
36 static uint32_t g_chained_count = 1;
37 static int g_fail_percent_goal = 0;
38 static uint8_t g_fill_pattern = 255;
39 static uint32_t g_xor_src_count = 2;
40 static bool g_verify = false;
41 static const char *g_workload_type = NULL;
42 static enum spdk_accel_opcode g_workload_selection = SPDK_ACCEL_OPC_LAST;
43 static const char *g_module_name = NULL;
44 static struct worker_thread *g_workers = NULL;
45 static int g_num_workers = 0;
46 static char *g_cd_file_in_name = NULL;
47 static pthread_mutex_t g_workers_lock = PTHREAD_MUTEX_INITIALIZER;
48 static struct spdk_app_opts g_opts = {};
49 
50 struct ap_compress_seg {
51 	void		*uncompressed_data;
52 	uint32_t	uncompressed_len;
53 	struct iovec	*uncompressed_iovs;
54 	uint32_t	uncompressed_iovcnt;
55 
56 	void		*compressed_data;
57 	uint32_t	compressed_len;
58 	uint32_t	compressed_len_padded;
59 	struct iovec	*compressed_iovs;
60 	uint32_t	compressed_iovcnt;
61 
62 	STAILQ_ENTRY(ap_compress_seg)	link;
63 };
64 
65 static STAILQ_HEAD(, ap_compress_seg) g_compress_segs = STAILQ_HEAD_INITIALIZER(g_compress_segs);
66 
67 struct worker_thread;
68 static void accel_done(void *ref, int status);
69 
70 struct display_info {
71 	int core;
72 	int thread;
73 };
74 
75 struct ap_task {
76 	void			*src;
77 	struct iovec		*src_iovs;
78 	uint32_t		src_iovcnt;
79 	void			**sources;
80 	struct iovec		*dst_iovs;
81 	uint32_t		dst_iovcnt;
82 	void			*dst;
83 	void			*dst2;
84 	uint32_t		*crc_dst;
85 	uint32_t		compressed_sz;
86 	struct ap_compress_seg *cur_seg;
87 	struct worker_thread	*worker;
88 	int			expected_status; /* used for the compare operation */
89 	uint32_t		num_blocks; /* used for the DIF related operations */
90 	struct spdk_dif_ctx	dif_ctx;
91 	struct spdk_dif_error	dif_err;
92 	TAILQ_ENTRY(ap_task)	link;
93 };
94 
95 struct worker_thread {
96 	struct spdk_io_channel		*ch;
97 	struct spdk_accel_opcode_stats	stats;
98 	uint64_t			xfer_failed;
99 	uint64_t			injected_miscompares;
100 	uint64_t			current_queue_depth;
101 	TAILQ_HEAD(, ap_task)		tasks_pool;
102 	struct worker_thread		*next;
103 	unsigned			core;
104 	struct spdk_thread		*thread;
105 	bool				is_draining;
106 	struct spdk_poller		*is_draining_poller;
107 	struct spdk_poller		*stop_poller;
108 	void				*task_base;
109 	struct display_info		display;
110 	enum spdk_accel_opcode		workload;
111 };
112 
113 static void
114 dump_user_config(void)
115 {
116 	const char *module_name = NULL;
117 	int rc;
118 
119 	rc = spdk_accel_get_opc_module_name(g_workload_selection, &module_name);
120 	if (rc) {
121 		printf("error getting module name (%d)\n", rc);
122 	}
123 
124 	printf("\nSPDK Configuration:\n");
125 	printf("Core mask:      %s\n\n", g_opts.reactor_mask);
126 	printf("Accel Perf Configuration:\n");
127 	printf("Workload Type:  %s\n", g_workload_type);
128 	if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
129 	    g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C) {
130 		printf("CRC-32C seed:   %u\n", g_crc32c_seed);
131 	} else if (g_workload_selection == SPDK_ACCEL_OPC_FILL) {
132 		printf("Fill pattern:   0x%x\n", g_fill_pattern);
133 	} else if ((g_workload_selection == SPDK_ACCEL_OPC_COMPARE) && g_fail_percent_goal > 0) {
134 		printf("Failure inject: %u percent\n", g_fail_percent_goal);
135 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
136 		printf("Source buffers: %u\n", g_xor_src_count);
137 	}
138 	if (g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
139 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
140 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
141 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
142 		printf("Vector size:    %u bytes\n", g_xfer_size_bytes);
143 		printf("Transfer size:  %u bytes\n", g_xfer_size_bytes * g_chained_count);
144 	} else {
145 		printf("Transfer size:  %u bytes\n", g_xfer_size_bytes);
146 	}
147 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
148 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
149 		printf("Block size:     %u bytes\n", g_block_size_bytes);
150 		printf("Metadata size:  %u bytes\n", g_md_size_bytes);
151 	}
152 	printf("Vector count    %u\n", g_chained_count);
153 	printf("Module:         %s\n", module_name);
154 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS ||
155 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
156 		printf("File Name:      %s\n", g_cd_file_in_name);
157 	}
158 	printf("Queue depth:    %u\n", g_queue_depth);
159 	printf("Allocate depth: %u\n", g_allocate_depth);
160 	printf("# threads/core: %u\n", g_threads_per_core);
161 	printf("Run time:       %u seconds\n", g_time_in_sec);
162 	printf("Verify:         %s\n\n", g_verify ? "Yes" : "No");
163 }
164 
165 static void
166 usage(void)
167 {
168 	printf("accel_perf options:\n");
169 	printf("\t[-h help message]\n");
170 	printf("\t[-q queue depth per core]\n");
171 	printf("\t[-C for supported workloads, use this value to configure the io vector size to test (default 1)\n");
172 	printf("\t[-T number of threads per core\n");
173 	printf("\t[-o transfer size in bytes (default: 4KiB. For compress/decompress, 0 means the input file size)]\n");
174 	printf("\t[-t time in seconds]\n");
175 	printf("\t[-w workload type must be one of these: copy, fill, crc32c, copy_crc32c, compare, compress, decompress, dualcast, xor,\n");
176 	printf("\t[                                       dif_verify, , dif_generate, dif_generate_copy\n");
177 	printf("\t[-M assign module to the operation, not compatible with accel_assign_opc RPC\n");
178 	printf("\t[-l for compress/decompress workloads, name of uncompressed input file\n");
179 	printf("\t[-S for crc32c workload, use this seed value (default 0)\n");
180 	printf("\t[-P for compare workload, percentage of operations that should miscompare (percent, default 0)\n");
181 	printf("\t[-f for fill workload, use this BYTE value (default 255)\n");
182 	printf("\t[-x for xor workload, use this number of source buffers (default, minimum: 2)]\n");
183 	printf("\t[-y verify result if this switch is on]\n");
184 	printf("\t[-a tasks to allocate per core (default: same value as -q)]\n");
185 	printf("\t\tCan be used to spread operations across a wider range of memory.\n");
186 }
187 
188 static int
189 parse_args(int ch, char *arg)
190 {
191 	int argval = 0;
192 
193 	switch (ch) {
194 	case 'a':
195 	case 'C':
196 	case 'f':
197 	case 'T':
198 	case 'o':
199 	case 'P':
200 	case 'q':
201 	case 'S':
202 	case 't':
203 	case 'x':
204 		argval = spdk_strtol(optarg, 10);
205 		if (argval < 0) {
206 			fprintf(stderr, "-%c option must be non-negative.\n", ch);
207 			usage();
208 			return 1;
209 		}
210 		break;
211 	default:
212 		break;
213 	};
214 
215 	switch (ch) {
216 	case 'a':
217 		g_allocate_depth = argval;
218 		break;
219 	case 'C':
220 		g_chained_count = argval;
221 		break;
222 	case 'l':
223 		g_cd_file_in_name = optarg;
224 		break;
225 	case 'f':
226 		g_fill_pattern = (uint8_t)argval;
227 		break;
228 	case 'T':
229 		g_threads_per_core = argval;
230 		break;
231 	case 'o':
232 		g_xfer_size_bytes = argval;
233 		break;
234 	case 'P':
235 		g_fail_percent_goal = argval;
236 		break;
237 	case 'q':
238 		g_queue_depth = argval;
239 		break;
240 	case 'S':
241 		g_crc32c_seed = argval;
242 		break;
243 	case 't':
244 		g_time_in_sec = argval;
245 		break;
246 	case 'x':
247 		g_xor_src_count = argval;
248 		break;
249 	case 'y':
250 		g_verify = true;
251 		break;
252 	case 'w':
253 		g_workload_type = optarg;
254 		if (!strcmp(g_workload_type, "copy")) {
255 			g_workload_selection = SPDK_ACCEL_OPC_COPY;
256 		} else if (!strcmp(g_workload_type, "fill")) {
257 			g_workload_selection = SPDK_ACCEL_OPC_FILL;
258 		} else if (!strcmp(g_workload_type, "crc32c")) {
259 			g_workload_selection = SPDK_ACCEL_OPC_CRC32C;
260 		} else if (!strcmp(g_workload_type, "copy_crc32c")) {
261 			g_workload_selection = SPDK_ACCEL_OPC_COPY_CRC32C;
262 		} else if (!strcmp(g_workload_type, "compare")) {
263 			g_workload_selection = SPDK_ACCEL_OPC_COMPARE;
264 		} else if (!strcmp(g_workload_type, "dualcast")) {
265 			g_workload_selection = SPDK_ACCEL_OPC_DUALCAST;
266 		} else if (!strcmp(g_workload_type, "compress")) {
267 			g_workload_selection = SPDK_ACCEL_OPC_COMPRESS;
268 		} else if (!strcmp(g_workload_type, "decompress")) {
269 			g_workload_selection = SPDK_ACCEL_OPC_DECOMPRESS;
270 		} else if (!strcmp(g_workload_type, "xor")) {
271 			g_workload_selection = SPDK_ACCEL_OPC_XOR;
272 		} else if (!strcmp(g_workload_type, "dif_verify")) {
273 			g_workload_selection = SPDK_ACCEL_OPC_DIF_VERIFY;
274 		} else if (!strcmp(g_workload_type, "dif_generate")) {
275 			g_workload_selection = SPDK_ACCEL_OPC_DIF_GENERATE;
276 		} else if (!strcmp(g_workload_type, "dif_generate_copy")) {
277 			g_workload_selection = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
278 		} else {
279 			fprintf(stderr, "Unsupported workload type: %s\n", optarg);
280 			usage();
281 			return 1;
282 		}
283 		break;
284 	case 'M':
285 		g_module_name = optarg;
286 		break;
287 
288 	default:
289 		usage();
290 		return 1;
291 	}
292 
293 	return 0;
294 }
295 
296 static int dump_result(void);
297 static void
298 unregister_worker(void *arg1)
299 {
300 	struct worker_thread *worker = arg1;
301 
302 	if (worker->ch) {
303 		spdk_accel_get_opcode_stats(worker->ch, worker->workload,
304 					    &worker->stats, sizeof(worker->stats));
305 		spdk_put_io_channel(worker->ch);
306 		worker->ch = NULL;
307 	}
308 	free(worker->task_base);
309 	spdk_thread_exit(spdk_get_thread());
310 	pthread_mutex_lock(&g_workers_lock);
311 	assert(g_num_workers >= 1);
312 	if (--g_num_workers == 0) {
313 		pthread_mutex_unlock(&g_workers_lock);
314 		/* Only dump results on successful runs */
315 		if (g_rc == 0) {
316 			g_rc = dump_result();
317 		}
318 		spdk_app_stop(g_rc);
319 	} else {
320 		pthread_mutex_unlock(&g_workers_lock);
321 	}
322 }
323 
324 static void
325 accel_perf_construct_iovs(void *buf, uint64_t sz, struct iovec *iovs, uint32_t iovcnt)
326 {
327 	uint64_t ele_size;
328 	uint8_t *data;
329 	uint32_t i;
330 
331 	ele_size = spdk_divide_round_up(sz, iovcnt);
332 
333 	data = buf;
334 	for (i = 0; i < iovcnt; i++) {
335 		ele_size = spdk_min(ele_size, sz);
336 		assert(ele_size > 0);
337 
338 		iovs[i].iov_base = data;
339 		iovs[i].iov_len = ele_size;
340 
341 		data += ele_size;
342 		sz -= ele_size;
343 	}
344 	assert(sz == 0);
345 }
346 
347 static int
348 _get_task_data_bufs(struct ap_task *task)
349 {
350 	uint32_t align = 0;
351 	uint32_t i = 0;
352 	int src_buff_len = g_xfer_size_bytes;
353 	int dst_buff_len = g_xfer_size_bytes;
354 	struct spdk_dif_ctx_init_ext_opts dif_opts;
355 	uint32_t num_blocks, transfer_size_with_md;
356 	int rc;
357 
358 	/* For dualcast, the DSA HW requires 4K alignment on destination addresses but
359 	 * we do this for all modules to keep it simple.
360 	 */
361 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST) {
362 		align = ALIGN_4K;
363 	}
364 
365 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS ||
366 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
367 		task->cur_seg = STAILQ_FIRST(&g_compress_segs);
368 
369 		if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
370 			dst_buff_len = task->cur_seg->compressed_len_padded;
371 		}
372 
373 		task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
374 		if (task->dst == NULL) {
375 			fprintf(stderr, "Unable to alloc dst buffer\n");
376 			return -ENOMEM;
377 		}
378 
379 		task->dst_iovs = calloc(g_chained_count, sizeof(struct iovec));
380 		if (!task->dst_iovs) {
381 			fprintf(stderr, "cannot allocate task->dst_iovs for task=%p\n", task);
382 			return -ENOMEM;
383 		}
384 		task->dst_iovcnt = g_chained_count;
385 		accel_perf_construct_iovs(task->dst, dst_buff_len, task->dst_iovs, task->dst_iovcnt);
386 
387 		return 0;
388 	}
389 
390 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
391 		task->dst_iovcnt = g_chained_count;
392 		task->dst_iovs = calloc(task->dst_iovcnt, sizeof(struct iovec));
393 		if (!task->dst_iovs) {
394 			fprintf(stderr, "cannot allocate task->dst_iovs for task=%p\n", task);
395 			return -ENOMEM;
396 		}
397 
398 		num_blocks = g_xfer_size_bytes / g_block_size_bytes;
399 		/* Add bytes for each block for metadata */
400 		transfer_size_with_md = g_xfer_size_bytes + (num_blocks * g_md_size_bytes);
401 		task->num_blocks = num_blocks;
402 
403 		for (i = 0; i < task->dst_iovcnt; i++) {
404 			task->dst_iovs[i].iov_base = spdk_dma_zmalloc(transfer_size_with_md, 0, NULL);
405 			if (task->dst_iovs[i].iov_base == NULL) {
406 				return -ENOMEM;
407 			}
408 			task->dst_iovs[i].iov_len = transfer_size_with_md;
409 		}
410 
411 		dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
412 		dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
413 
414 		rc = spdk_dif_ctx_init(&task->dif_ctx,
415 				       g_block_size_bytes + g_md_size_bytes,
416 				       g_md_size_bytes, true, true,
417 				       SPDK_DIF_TYPE1,
418 				       SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
419 				       0x123, 0xFFFF, 0x234, 0, 0, &dif_opts);
420 		if (rc != 0) {
421 			fprintf(stderr, "Initialization of DIF context failed\n");
422 			return rc;
423 		}
424 	}
425 
426 	if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
427 	    g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C) {
428 		task->crc_dst = spdk_dma_zmalloc(sizeof(*task->crc_dst), 0, NULL);
429 	}
430 
431 	if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
432 	    g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
433 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
434 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
435 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
436 		assert(g_chained_count > 0);
437 		task->src_iovcnt = g_chained_count;
438 		task->src_iovs = calloc(task->src_iovcnt, sizeof(struct iovec));
439 		if (!task->src_iovs) {
440 			fprintf(stderr, "cannot allocated task->src_iovs fot task=%p\n", task);
441 			return -ENOMEM;
442 		}
443 
444 		if (g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C) {
445 			dst_buff_len = g_xfer_size_bytes * g_chained_count;
446 		}
447 
448 		if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
449 		    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
450 			src_buff_len += (g_xfer_size_bytes / g_block_size_bytes) * g_md_size_bytes;
451 		}
452 
453 		for (i = 0; i < task->src_iovcnt; i++) {
454 			task->src_iovs[i].iov_base = spdk_dma_zmalloc(src_buff_len, 0, NULL);
455 			if (task->src_iovs[i].iov_base == NULL) {
456 				return -ENOMEM;
457 			}
458 			memset(task->src_iovs[i].iov_base, DATA_PATTERN, src_buff_len);
459 			task->src_iovs[i].iov_len = src_buff_len;
460 		}
461 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
462 		assert(g_xor_src_count > 1);
463 		task->sources = calloc(g_xor_src_count, sizeof(*task->sources));
464 		if (!task->sources) {
465 			return -ENOMEM;
466 		}
467 
468 		for (i = 0; i < g_xor_src_count; i++) {
469 			task->sources[i] = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
470 			if (!task->sources[i]) {
471 				return -ENOMEM;
472 			}
473 			memset(task->sources[i], DATA_PATTERN, g_xfer_size_bytes);
474 		}
475 	} else {
476 		task->src = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
477 		if (task->src == NULL) {
478 			fprintf(stderr, "Unable to alloc src buffer\n");
479 			return -ENOMEM;
480 		}
481 
482 		/* For fill, set the entire src buffer so we can check if verify is enabled. */
483 		if (g_workload_selection == SPDK_ACCEL_OPC_FILL) {
484 			memset(task->src, g_fill_pattern, g_xfer_size_bytes);
485 		} else {
486 			memset(task->src, DATA_PATTERN, g_xfer_size_bytes);
487 		}
488 	}
489 
490 	if (g_workload_selection != SPDK_ACCEL_OPC_CRC32C &&
491 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_VERIFY &&
492 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_GENERATE &&
493 	    g_workload_selection != SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
494 		task->dst = spdk_dma_zmalloc(dst_buff_len, align, NULL);
495 		if (task->dst == NULL) {
496 			fprintf(stderr, "Unable to alloc dst buffer\n");
497 			return -ENOMEM;
498 		}
499 
500 		/* For compare we want the buffers to match, otherwise not. */
501 		if (g_workload_selection == SPDK_ACCEL_OPC_COMPARE) {
502 			memset(task->dst, DATA_PATTERN, dst_buff_len);
503 		} else {
504 			memset(task->dst, ~DATA_PATTERN, dst_buff_len);
505 		}
506 	}
507 
508 	/* For dualcast 2 buffers are needed for the operation.  */
509 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST ||
510 	    (g_workload_selection == SPDK_ACCEL_OPC_XOR && g_verify)) {
511 		task->dst2 = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
512 		if (task->dst2 == NULL) {
513 			fprintf(stderr, "Unable to alloc dst buffer\n");
514 			return -ENOMEM;
515 		}
516 		memset(task->dst2, ~DATA_PATTERN, g_xfer_size_bytes);
517 	}
518 
519 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
520 	    g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
521 		dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
522 		dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
523 
524 		task->num_blocks = (g_xfer_size_bytes * g_chained_count) / g_block_size_bytes;
525 
526 		rc = spdk_dif_ctx_init(&task->dif_ctx,
527 				       g_block_size_bytes + g_md_size_bytes,
528 				       g_md_size_bytes, true, true,
529 				       SPDK_DIF_TYPE1,
530 				       SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_APPTAG_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
531 				       16, 0xFFFF, 10, 0, 0, &dif_opts);
532 		if (rc != 0) {
533 			fprintf(stderr, "Initialization of DIF context failed, error (%d)\n", rc);
534 			return rc;
535 		}
536 
537 		if (g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY) {
538 			rc = spdk_dif_generate(task->src_iovs, task->src_iovcnt, task->num_blocks, &task->dif_ctx);
539 			if (rc != 0) {
540 				fprintf(stderr, "Generation of DIF failed, error (%d)\n", rc);
541 				return rc;
542 			}
543 		}
544 	}
545 
546 	return 0;
547 }
548 
549 inline static struct ap_task *
550 _get_task(struct worker_thread *worker)
551 {
552 	struct ap_task *task;
553 
554 	if (!TAILQ_EMPTY(&worker->tasks_pool)) {
555 		task = TAILQ_FIRST(&worker->tasks_pool);
556 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
557 	} else {
558 		fprintf(stderr, "Unable to get ap_task\n");
559 		return NULL;
560 	}
561 
562 	return task;
563 }
564 
565 /* Submit one operation using the same ap task that just completed. */
566 static void
567 _submit_single(struct worker_thread *worker, struct ap_task *task)
568 {
569 	int random_num;
570 	int rc = 0;
571 
572 	assert(worker);
573 
574 	switch (worker->workload) {
575 	case SPDK_ACCEL_OPC_COPY:
576 		rc = spdk_accel_submit_copy(worker->ch, task->dst, task->src,
577 					    g_xfer_size_bytes, accel_done, task);
578 		break;
579 	case SPDK_ACCEL_OPC_FILL:
580 		/* For fill use the first byte of the task->dst buffer */
581 		rc = spdk_accel_submit_fill(worker->ch, task->dst, *(uint8_t *)task->src,
582 					    g_xfer_size_bytes, accel_done, task);
583 		break;
584 	case SPDK_ACCEL_OPC_CRC32C:
585 		rc = spdk_accel_submit_crc32cv(worker->ch, task->crc_dst,
586 					       task->src_iovs, task->src_iovcnt, g_crc32c_seed,
587 					       accel_done, task);
588 		break;
589 	case SPDK_ACCEL_OPC_COPY_CRC32C:
590 		rc = spdk_accel_submit_copy_crc32cv(worker->ch, task->dst, task->src_iovs, task->src_iovcnt,
591 						    task->crc_dst, g_crc32c_seed, accel_done, task);
592 		break;
593 	case SPDK_ACCEL_OPC_COMPARE:
594 		random_num = rand() % 100;
595 		if (random_num < g_fail_percent_goal) {
596 			task->expected_status = -EILSEQ;
597 			*(uint8_t *)task->dst = ~DATA_PATTERN;
598 		} else {
599 			task->expected_status = 0;
600 			*(uint8_t *)task->dst = DATA_PATTERN;
601 		}
602 		rc = spdk_accel_submit_compare(worker->ch, task->dst, task->src,
603 					       g_xfer_size_bytes, accel_done, task);
604 		break;
605 	case SPDK_ACCEL_OPC_DUALCAST:
606 		rc = spdk_accel_submit_dualcast(worker->ch, task->dst, task->dst2,
607 						task->src, g_xfer_size_bytes, accel_done, task);
608 		break;
609 	case SPDK_ACCEL_OPC_COMPRESS:
610 		task->src_iovs = task->cur_seg->uncompressed_iovs;
611 		task->src_iovcnt = task->cur_seg->uncompressed_iovcnt;
612 		rc = spdk_accel_submit_compress(worker->ch, task->dst, task->cur_seg->compressed_len_padded,
613 						task->src_iovs,
614 						task->src_iovcnt, &task->compressed_sz, accel_done, task);
615 		break;
616 	case SPDK_ACCEL_OPC_DECOMPRESS:
617 		task->src_iovs = task->cur_seg->compressed_iovs;
618 		task->src_iovcnt = task->cur_seg->compressed_iovcnt;
619 		rc = spdk_accel_submit_decompress(worker->ch, task->dst_iovs, task->dst_iovcnt, task->src_iovs,
620 						  task->src_iovcnt, NULL, accel_done, task);
621 		break;
622 	case SPDK_ACCEL_OPC_XOR:
623 		rc = spdk_accel_submit_xor(worker->ch, task->dst, task->sources, g_xor_src_count,
624 					   g_xfer_size_bytes, accel_done, task);
625 		break;
626 	case SPDK_ACCEL_OPC_DIF_VERIFY:
627 		rc = spdk_accel_submit_dif_verify(worker->ch, task->src_iovs, task->src_iovcnt, task->num_blocks,
628 						  &task->dif_ctx, &task->dif_err, accel_done, task);
629 		break;
630 	case SPDK_ACCEL_OPC_DIF_GENERATE:
631 		rc = spdk_accel_submit_dif_generate(worker->ch, task->src_iovs, task->src_iovcnt, task->num_blocks,
632 						    &task->dif_ctx, accel_done, task);
633 		break;
634 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
635 		rc = spdk_accel_submit_dif_generate_copy(worker->ch, task->dst_iovs, task->dst_iovcnt,
636 				task->src_iovs, task->src_iovcnt,
637 				task->num_blocks, &task->dif_ctx, accel_done, task);
638 		break;
639 	default:
640 		assert(false);
641 		break;
642 
643 	}
644 
645 	worker->current_queue_depth++;
646 	if (rc) {
647 		accel_done(task, rc);
648 	}
649 }
650 
651 static void
652 _free_task_buffers(struct ap_task *task)
653 {
654 	uint32_t i;
655 
656 	if (g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS ||
657 	    g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
658 		free(task->dst_iovs);
659 	} else if (g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
660 		   g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
661 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
662 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE ||
663 		   g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
664 		if (task->crc_dst) {
665 			spdk_dma_free(task->crc_dst);
666 		}
667 		if (task->src_iovs) {
668 			for (i = 0; i < task->src_iovcnt; i++) {
669 				if (task->src_iovs[i].iov_base) {
670 					spdk_dma_free(task->src_iovs[i].iov_base);
671 				}
672 			}
673 			free(task->src_iovs);
674 		}
675 	} else if (g_workload_selection == SPDK_ACCEL_OPC_XOR) {
676 		if (task->sources) {
677 			for (i = 0; i < g_xor_src_count; i++) {
678 				spdk_dma_free(task->sources[i]);
679 			}
680 			free(task->sources);
681 		}
682 	} else {
683 		spdk_dma_free(task->src);
684 	}
685 
686 	spdk_dma_free(task->dst);
687 	if (g_workload_selection == SPDK_ACCEL_OPC_DUALCAST || g_workload_selection == SPDK_ACCEL_OPC_XOR) {
688 		spdk_dma_free(task->dst2);
689 	}
690 
691 	if (g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE_COPY) {
692 		if (task->dst_iovs) {
693 			for (i = 0; i < task->dst_iovcnt; i++) {
694 				if (task->dst_iovs[i].iov_base) {
695 					spdk_dma_free(task->dst_iovs[i].iov_base);
696 				}
697 			}
698 			free(task->dst_iovs);
699 		}
700 	}
701 }
702 
703 static int
704 _vector_memcmp(void *_dst, struct iovec *src_src_iovs, uint32_t iovcnt)
705 {
706 	uint32_t i;
707 	uint32_t ttl_len = 0;
708 	uint8_t *dst = (uint8_t *)_dst;
709 
710 	for (i = 0; i < iovcnt; i++) {
711 		if (memcmp(dst, src_src_iovs[i].iov_base, src_src_iovs[i].iov_len)) {
712 			return -1;
713 		}
714 		dst += src_src_iovs[i].iov_len;
715 		ttl_len += src_src_iovs[i].iov_len;
716 	}
717 
718 	if (ttl_len != iovcnt * g_xfer_size_bytes) {
719 		return -1;
720 	}
721 
722 	return 0;
723 }
724 
725 static int _worker_stop(void *arg);
726 
727 static void
728 accel_done(void *arg1, int status)
729 {
730 	struct ap_task *task = arg1;
731 	struct worker_thread *worker = task->worker;
732 	uint32_t sw_crc32c;
733 	struct spdk_dif_error err_blk;
734 
735 	assert(worker);
736 	assert(worker->current_queue_depth > 0);
737 
738 	if (g_verify && status == 0) {
739 		switch (worker->workload) {
740 		case SPDK_ACCEL_OPC_COPY_CRC32C:
741 			sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
742 			if (*task->crc_dst != sw_crc32c) {
743 				SPDK_NOTICELOG("CRC-32C miscompare\n");
744 				worker->xfer_failed++;
745 			}
746 			if (_vector_memcmp(task->dst, task->src_iovs, task->src_iovcnt)) {
747 				SPDK_NOTICELOG("Data miscompare\n");
748 				worker->xfer_failed++;
749 			}
750 			break;
751 		case SPDK_ACCEL_OPC_CRC32C:
752 			sw_crc32c = spdk_crc32c_iov_update(task->src_iovs, task->src_iovcnt, ~g_crc32c_seed);
753 			if (*task->crc_dst != sw_crc32c) {
754 				SPDK_NOTICELOG("CRC-32C miscompare\n");
755 				worker->xfer_failed++;
756 			}
757 			break;
758 		case SPDK_ACCEL_OPC_COPY:
759 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
760 				SPDK_NOTICELOG("Data miscompare\n");
761 				worker->xfer_failed++;
762 			}
763 			break;
764 		case SPDK_ACCEL_OPC_DUALCAST:
765 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
766 				SPDK_NOTICELOG("Data miscompare, first destination\n");
767 				worker->xfer_failed++;
768 			}
769 			if (memcmp(task->src, task->dst2, g_xfer_size_bytes)) {
770 				SPDK_NOTICELOG("Data miscompare, second destination\n");
771 				worker->xfer_failed++;
772 			}
773 			break;
774 		case SPDK_ACCEL_OPC_FILL:
775 			if (memcmp(task->dst, task->src, g_xfer_size_bytes)) {
776 				SPDK_NOTICELOG("Data miscompare\n");
777 				worker->xfer_failed++;
778 			}
779 			break;
780 		case SPDK_ACCEL_OPC_COMPARE:
781 			break;
782 		case SPDK_ACCEL_OPC_COMPRESS:
783 			break;
784 		case SPDK_ACCEL_OPC_DECOMPRESS:
785 			if (memcmp(task->dst, task->cur_seg->uncompressed_data, task->cur_seg->uncompressed_len)) {
786 				SPDK_NOTICELOG("Data miscompare on decompression\n");
787 				worker->xfer_failed++;
788 			}
789 			break;
790 		case SPDK_ACCEL_OPC_XOR:
791 			if (spdk_xor_gen(task->dst2, task->sources, g_xor_src_count,
792 					 g_xfer_size_bytes) != 0) {
793 				SPDK_ERRLOG("Failed to generate xor for verification\n");
794 			} else if (memcmp(task->dst, task->dst2, g_xfer_size_bytes)) {
795 				SPDK_NOTICELOG("Data miscompare\n");
796 				worker->xfer_failed++;
797 			}
798 			break;
799 		case SPDK_ACCEL_OPC_DIF_VERIFY:
800 			break;
801 		case SPDK_ACCEL_OPC_DIF_GENERATE:
802 			if (spdk_dif_verify(task->src_iovs, task->src_iovcnt, task->num_blocks,
803 					    &task->dif_ctx, &err_blk) != 0) {
804 				SPDK_NOTICELOG("Data miscompare, "
805 					       "err_type %u, expected %lu, actual %lu, err_offset %u\n",
806 					       err_blk.err_type, err_blk.expected,
807 					       err_blk.actual, err_blk.err_offset);
808 				worker->xfer_failed++;
809 			}
810 			break;
811 		case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
812 			if (spdk_dif_verify(task->dst_iovs, task->dst_iovcnt, task->num_blocks,
813 					    &task->dif_ctx, &err_blk) != 0) {
814 				SPDK_NOTICELOG("Data miscompare, "
815 					       "err_type %u, expected %lu, actual %lu, err_offset %u\n",
816 					       err_blk.err_type, err_blk.expected,
817 					       err_blk.actual, err_blk.err_offset);
818 				worker->xfer_failed++;
819 			}
820 			break;
821 		default:
822 			assert(false);
823 			break;
824 		}
825 	}
826 
827 	if (worker->workload == SPDK_ACCEL_OPC_COMPRESS ||
828 	    g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
829 		/* Advance the task to the next segment */
830 		task->cur_seg = STAILQ_NEXT(task->cur_seg, link);
831 		if (task->cur_seg == NULL) {
832 			task->cur_seg = STAILQ_FIRST(&g_compress_segs);
833 		}
834 	}
835 
836 	if (task->expected_status == -EILSEQ) {
837 		assert(status != 0);
838 		worker->injected_miscompares++;
839 		status = 0;
840 	} else if (status) {
841 		/* Expected to pass but the accel module reported an error (ex: COMPARE operation). */
842 		worker->xfer_failed++;
843 	}
844 
845 	worker->current_queue_depth--;
846 
847 	if (!worker->is_draining && status == 0) {
848 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
849 		task = _get_task(worker);
850 		_submit_single(worker, task);
851 	} else {
852 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
853 	}
854 }
855 
856 static int
857 dump_result(void)
858 {
859 	uint64_t total_completed = 0;
860 	uint64_t total_failed = 0;
861 	uint64_t total_miscompared = 0;
862 	uint64_t total_xfer_per_sec, total_bw_in_MiBps;
863 	struct worker_thread *worker = g_workers;
864 	char tmp[64];
865 
866 	printf("\n%-12s %20s %16s %16s %16s\n",
867 	       "Core,Thread", "Transfers", "Bandwidth", "Failed", "Miscompares");
868 	printf("------------------------------------------------------------------------------------\n");
869 	while (worker != NULL) {
870 
871 		uint64_t xfer_per_sec = worker->stats.executed / g_time_in_sec;
872 		uint64_t bw_in_MiBps = worker->stats.num_bytes /
873 				       (g_time_in_sec * 1024 * 1024);
874 
875 		total_completed += worker->stats.executed;
876 		total_failed += worker->xfer_failed;
877 		total_miscompared += worker->injected_miscompares;
878 
879 		snprintf(tmp, sizeof(tmp), "%u,%u", worker->display.core, worker->display.thread);
880 		if (xfer_per_sec) {
881 			printf("%-12s %18" PRIu64 "/s %10" PRIu64 " MiB/s %16"PRIu64 " %16" PRIu64 "\n",
882 			       tmp, xfer_per_sec, bw_in_MiBps, worker->xfer_failed,
883 			       worker->injected_miscompares);
884 		}
885 
886 		worker = worker->next;
887 	}
888 
889 	total_xfer_per_sec = total_completed / g_time_in_sec;
890 	total_bw_in_MiBps = (total_completed * g_xfer_size_bytes) /
891 			    (g_time_in_sec * 1024 * 1024);
892 
893 	printf("====================================================================================\n");
894 	printf("%-12s %18" PRIu64 "/s %10" PRIu64 " MiB/s %16"PRIu64 " %16" PRIu64 "\n",
895 	       "Total", total_xfer_per_sec, total_bw_in_MiBps, total_failed, total_miscompared);
896 
897 	return total_failed ? 1 : 0;
898 }
899 
900 static inline void
901 _free_task_buffers_in_pool(struct worker_thread *worker)
902 {
903 	struct ap_task *task;
904 
905 	assert(worker);
906 	while ((task = TAILQ_FIRST(&worker->tasks_pool))) {
907 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
908 		_free_task_buffers(task);
909 	}
910 }
911 
912 static int
913 _check_draining(void *arg)
914 {
915 	struct worker_thread *worker = arg;
916 
917 	assert(worker);
918 
919 	if (worker->current_queue_depth == 0) {
920 		_free_task_buffers_in_pool(worker);
921 		spdk_poller_unregister(&worker->is_draining_poller);
922 		unregister_worker(worker);
923 	}
924 
925 	return SPDK_POLLER_BUSY;
926 }
927 
928 static int
929 _worker_stop(void *arg)
930 {
931 	struct worker_thread *worker = arg;
932 
933 	assert(worker);
934 
935 	spdk_poller_unregister(&worker->stop_poller);
936 
937 	/* now let the worker drain and check it's outstanding IO with a poller */
938 	worker->is_draining = true;
939 	worker->is_draining_poller = SPDK_POLLER_REGISTER(_check_draining, worker, 0);
940 
941 	return SPDK_POLLER_BUSY;
942 }
943 
944 static void shutdown_cb(void);
945 
946 static void
947 _init_thread(void *arg1)
948 {
949 	struct worker_thread *worker;
950 	struct ap_task *task;
951 	int i, num_tasks = g_allocate_depth;
952 	struct display_info *display = arg1;
953 
954 	worker = calloc(1, sizeof(*worker));
955 	if (worker == NULL) {
956 		fprintf(stderr, "Unable to allocate worker\n");
957 		free(display);
958 		spdk_thread_exit(spdk_get_thread());
959 		goto no_worker;
960 	}
961 
962 	worker->workload = g_workload_selection;
963 	worker->display.core = display->core;
964 	worker->display.thread = display->thread;
965 	free(display);
966 	worker->core = spdk_env_get_current_core();
967 	worker->thread = spdk_get_thread();
968 	pthread_mutex_lock(&g_workers_lock);
969 	g_num_workers++;
970 	worker->next = g_workers;
971 	g_workers = worker;
972 	pthread_mutex_unlock(&g_workers_lock);
973 	worker->ch = spdk_accel_get_io_channel();
974 	if (worker->ch == NULL) {
975 		fprintf(stderr, "Unable to get an accel channel\n");
976 		goto error;
977 	}
978 
979 	TAILQ_INIT(&worker->tasks_pool);
980 
981 	worker->task_base = calloc(num_tasks, sizeof(struct ap_task));
982 	if (worker->task_base == NULL) {
983 		fprintf(stderr, "Could not allocate task base.\n");
984 		goto error;
985 	}
986 
987 	task = worker->task_base;
988 	for (i = 0; i < num_tasks; i++) {
989 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
990 		task->worker = worker;
991 		if (_get_task_data_bufs(task)) {
992 			fprintf(stderr, "Unable to get data bufs\n");
993 			goto error;
994 		}
995 		task++;
996 	}
997 
998 	/* Register a poller that will stop the worker at time elapsed */
999 	worker->stop_poller = SPDK_POLLER_REGISTER(_worker_stop, worker,
1000 			      g_time_in_sec * 1000000ULL);
1001 
1002 	/* Load up queue depth worth of operations. */
1003 	for (i = 0; i < g_queue_depth; i++) {
1004 		task = _get_task(worker);
1005 		if (task == NULL) {
1006 			goto error;
1007 		}
1008 
1009 		_submit_single(worker, task);
1010 	}
1011 	return;
1012 error:
1013 
1014 	_free_task_buffers_in_pool(worker);
1015 	free(worker->task_base);
1016 no_worker:
1017 	shutdown_cb();
1018 	g_rc = -1;
1019 }
1020 
1021 static void
1022 accel_perf_start(void *arg1)
1023 {
1024 	struct spdk_cpuset tmp_cpumask = {};
1025 	char thread_name[32];
1026 	uint32_t i;
1027 	int j;
1028 	struct spdk_thread *thread;
1029 	struct display_info *display;
1030 
1031 	g_tsc_rate = spdk_get_ticks_hz();
1032 	g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
1033 
1034 	dump_user_config();
1035 
1036 	printf("Running for %d seconds...\n", g_time_in_sec);
1037 	fflush(stdout);
1038 
1039 	/* Create worker threads for each core that was specified. */
1040 	SPDK_ENV_FOREACH_CORE(i) {
1041 		for (j = 0; j < g_threads_per_core; j++) {
1042 			snprintf(thread_name, sizeof(thread_name), "ap_worker_%u_%u", i, j);
1043 			spdk_cpuset_zero(&tmp_cpumask);
1044 			spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
1045 			thread = spdk_thread_create(thread_name, &tmp_cpumask);
1046 			display = calloc(1, sizeof(*display));
1047 			if (display == NULL) {
1048 				fprintf(stderr, "Unable to allocate memory\n");
1049 				spdk_app_stop(-1);
1050 				return;
1051 			}
1052 			display->core = i;
1053 			display->thread = j;
1054 			spdk_thread_send_msg(thread, _init_thread, display);
1055 		}
1056 	}
1057 }
1058 
1059 static void
1060 accel_perf_free_compress_segs(void)
1061 {
1062 	struct ap_compress_seg *seg, *tmp;
1063 
1064 	STAILQ_FOREACH_SAFE(seg, &g_compress_segs, link, tmp) {
1065 		free(seg->uncompressed_iovs);
1066 		free(seg->compressed_iovs);
1067 		spdk_dma_free(seg->compressed_data);
1068 		spdk_dma_free(seg->uncompressed_data);
1069 		STAILQ_REMOVE_HEAD(&g_compress_segs, link);
1070 		free(seg);
1071 	}
1072 }
1073 
1074 struct accel_perf_prep_ctx {
1075 	FILE			*file;
1076 	long			remaining;
1077 	struct spdk_io_channel	*ch;
1078 	struct ap_compress_seg	*cur_seg;
1079 };
1080 
1081 static void accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx);
1082 
1083 static void
1084 accel_perf_prep_process_seg_cpl(void *ref, int status)
1085 {
1086 	struct accel_perf_prep_ctx *ctx = ref;
1087 	struct ap_compress_seg *seg;
1088 
1089 	if (status != 0) {
1090 		fprintf(stderr, "error (%d) on initial compress completion\n", status);
1091 		spdk_dma_free(ctx->cur_seg->compressed_data);
1092 		spdk_dma_free(ctx->cur_seg->uncompressed_data);
1093 		free(ctx->cur_seg);
1094 		spdk_put_io_channel(ctx->ch);
1095 		fclose(ctx->file);
1096 		free(ctx);
1097 		spdk_app_stop(-status);
1098 		return;
1099 	}
1100 
1101 	seg = ctx->cur_seg;
1102 
1103 	if (g_workload_selection == SPDK_ACCEL_OPC_DECOMPRESS) {
1104 		seg->compressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
1105 		if (seg->compressed_iovs == NULL) {
1106 			fprintf(stderr, "unable to allocate iovec\n");
1107 			spdk_dma_free(seg->compressed_data);
1108 			spdk_dma_free(seg->uncompressed_data);
1109 			free(seg);
1110 			spdk_put_io_channel(ctx->ch);
1111 			fclose(ctx->file);
1112 			free(ctx);
1113 			spdk_app_stop(-ENOMEM);
1114 			return;
1115 		}
1116 		seg->compressed_iovcnt = g_chained_count;
1117 
1118 		accel_perf_construct_iovs(seg->compressed_data, seg->compressed_len, seg->compressed_iovs,
1119 					  seg->compressed_iovcnt);
1120 	}
1121 
1122 	STAILQ_INSERT_TAIL(&g_compress_segs, seg, link);
1123 	ctx->remaining -= seg->uncompressed_len;
1124 
1125 	accel_perf_prep_process_seg(ctx);
1126 }
1127 
1128 static void
1129 accel_perf_prep_process_seg(struct accel_perf_prep_ctx *ctx)
1130 {
1131 	struct ap_compress_seg *seg;
1132 	int sz, sz_read, sz_padded;
1133 	void *ubuf, *cbuf;
1134 	struct iovec iov[1];
1135 	int rc;
1136 
1137 	if (ctx->remaining == 0) {
1138 		spdk_put_io_channel(ctx->ch);
1139 		fclose(ctx->file);
1140 		free(ctx);
1141 		accel_perf_start(NULL);
1142 		return;
1143 	}
1144 
1145 	sz = spdk_min(ctx->remaining, g_xfer_size_bytes);
1146 	/* Add 10% pad to the compress buffer for incompressible data. Note that a real app
1147 	 * would likely either deal with the failure of not having a large enough buffer
1148 	 * by submitting another operation with a larger one.  Or, like the vbdev module
1149 	 * does, just accept the error and use the data uncompressed marking it as such in
1150 	 * its own metadata so that in the future it doesn't try to decompress uncompressed
1151 	 * data, etc.
1152 	 */
1153 	sz_padded = sz * COMP_BUF_PAD_PERCENTAGE;
1154 
1155 	ubuf = spdk_dma_zmalloc(sz, ALIGN_4K, NULL);
1156 	if (!ubuf) {
1157 		fprintf(stderr, "unable to allocate uncompress buffer\n");
1158 		rc = -ENOMEM;
1159 		goto error;
1160 	}
1161 
1162 	cbuf = spdk_dma_malloc(sz_padded, ALIGN_4K, NULL);
1163 	if (!cbuf) {
1164 		fprintf(stderr, "unable to allocate compress buffer\n");
1165 		rc = -ENOMEM;
1166 		spdk_dma_free(ubuf);
1167 		goto error;
1168 	}
1169 
1170 	seg = calloc(1, sizeof(*seg));
1171 	if (!seg) {
1172 		fprintf(stderr, "unable to allocate comp/decomp segment\n");
1173 		spdk_dma_free(ubuf);
1174 		spdk_dma_free(cbuf);
1175 		rc = -ENOMEM;
1176 		goto error;
1177 	}
1178 
1179 	sz_read = fread(ubuf, sizeof(uint8_t), sz, ctx->file);
1180 	if (sz_read != sz) {
1181 		fprintf(stderr, "unable to read input file\n");
1182 		free(seg);
1183 		spdk_dma_free(ubuf);
1184 		spdk_dma_free(cbuf);
1185 		rc = -errno;
1186 		goto error;
1187 	}
1188 
1189 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS) {
1190 		seg->uncompressed_iovs = calloc(g_chained_count, sizeof(struct iovec));
1191 		if (seg->uncompressed_iovs == NULL) {
1192 			fprintf(stderr, "unable to allocate iovec\n");
1193 			free(seg);
1194 			spdk_dma_free(ubuf);
1195 			spdk_dma_free(cbuf);
1196 			rc = -ENOMEM;
1197 			goto error;
1198 		}
1199 		seg->uncompressed_iovcnt = g_chained_count;
1200 		accel_perf_construct_iovs(ubuf, sz, seg->uncompressed_iovs, seg->uncompressed_iovcnt);
1201 	}
1202 
1203 	seg->uncompressed_data = ubuf;
1204 	seg->uncompressed_len = sz;
1205 	seg->compressed_data = cbuf;
1206 	seg->compressed_len = sz;
1207 	seg->compressed_len_padded = sz_padded;
1208 
1209 	ctx->cur_seg = seg;
1210 	iov[0].iov_base = seg->uncompressed_data;
1211 	iov[0].iov_len = seg->uncompressed_len;
1212 	/* Note that anytime a call is made to spdk_accel_submit_compress() there's a chance
1213 	 * it will fail with -ENOMEM in the event that the destination buffer is not large enough
1214 	 * to hold the compressed data.  This example app simply adds 10% buffer for compressed data
1215 	 * but real applications may want to consider a more sophisticated method.
1216 	 */
1217 	rc = spdk_accel_submit_compress(ctx->ch, seg->compressed_data, seg->compressed_len_padded, iov, 1,
1218 					&seg->compressed_len, accel_perf_prep_process_seg_cpl, ctx);
1219 	if (rc < 0) {
1220 		fprintf(stderr, "error (%d) on initial compress submission\n", rc);
1221 		goto error;
1222 	}
1223 
1224 	return;
1225 
1226 error:
1227 	spdk_put_io_channel(ctx->ch);
1228 	fclose(ctx->file);
1229 	free(ctx);
1230 	spdk_app_stop(rc);
1231 }
1232 
1233 static void
1234 accel_perf_prep(void *arg1)
1235 {
1236 	struct accel_perf_prep_ctx *ctx;
1237 	const char *module_name = NULL;
1238 	int rc = 0;
1239 
1240 	if (g_module_name) {
1241 		rc = spdk_accel_get_opc_module_name(g_workload_selection, &module_name);
1242 		if (rc != 0 || strcmp(g_module_name, module_name) != 0) {
1243 			fprintf(stderr, "Module '%s' was assigned via JSON config or RPC, instead of '%s'\n",
1244 				module_name, g_module_name);
1245 			fprintf(stderr, "-M option is not compatible with accel_assign_opc RPC\n");
1246 			rc = -EINVAL;
1247 			goto error_end;
1248 		}
1249 	}
1250 
1251 	if (g_workload_selection != SPDK_ACCEL_OPC_COMPRESS &&
1252 	    g_workload_selection != SPDK_ACCEL_OPC_DECOMPRESS) {
1253 		accel_perf_start(arg1);
1254 		return;
1255 	}
1256 
1257 	if (g_cd_file_in_name == NULL) {
1258 		fprintf(stdout, "A filename is required.\n");
1259 		rc = -EINVAL;
1260 		goto error_end;
1261 	}
1262 
1263 	if (g_workload_selection == SPDK_ACCEL_OPC_COMPRESS && g_verify) {
1264 		fprintf(stdout, "\nCompression does not support the verify option, aborting.\n");
1265 		rc = -ENOTSUP;
1266 		goto error_end;
1267 	}
1268 
1269 	printf("Preparing input file...\n");
1270 
1271 	ctx = calloc(1, sizeof(*ctx));
1272 	if (ctx == NULL) {
1273 		rc = -ENOMEM;
1274 		goto error_end;
1275 	}
1276 
1277 	ctx->file = fopen(g_cd_file_in_name, "r");
1278 	if (ctx->file == NULL) {
1279 		fprintf(stderr, "Could not open file %s.\n", g_cd_file_in_name);
1280 		rc = -errno;
1281 		goto error_ctx;
1282 	}
1283 
1284 	fseek(ctx->file, 0L, SEEK_END);
1285 	ctx->remaining = ftell(ctx->file);
1286 	fseek(ctx->file, 0L, SEEK_SET);
1287 
1288 	ctx->ch = spdk_accel_get_io_channel();
1289 	if (ctx->ch == NULL) {
1290 		rc = -EAGAIN;
1291 		goto error_file;
1292 	}
1293 
1294 	if (g_xfer_size_bytes == 0) {
1295 		/* size of 0 means "file at a time" */
1296 		g_xfer_size_bytes = ctx->remaining;
1297 	}
1298 
1299 	accel_perf_prep_process_seg(ctx);
1300 	return;
1301 
1302 error_file:
1303 	fclose(ctx->file);
1304 error_ctx:
1305 	free(ctx);
1306 error_end:
1307 	spdk_app_stop(rc);
1308 }
1309 
1310 static void
1311 worker_shutdown(void *ctx)
1312 {
1313 	_worker_stop(ctx);
1314 }
1315 
1316 static void
1317 shutdown_cb(void)
1318 {
1319 	struct worker_thread *worker;
1320 
1321 	pthread_mutex_lock(&g_workers_lock);
1322 	if (!g_workers) {
1323 		spdk_app_stop(1);
1324 		goto unlock;
1325 	}
1326 
1327 	worker = g_workers;
1328 	while (worker) {
1329 		spdk_thread_send_msg(worker->thread, worker_shutdown, worker);
1330 		worker = worker->next;
1331 	}
1332 unlock:
1333 	pthread_mutex_unlock(&g_workers_lock);
1334 }
1335 
1336 int
1337 main(int argc, char **argv)
1338 {
1339 	struct worker_thread *worker, *tmp;
1340 	int rc;
1341 
1342 	pthread_mutex_init(&g_workers_lock, NULL);
1343 	spdk_app_opts_init(&g_opts, sizeof(g_opts));
1344 	g_opts.name = "accel_perf";
1345 	g_opts.reactor_mask = "0x1";
1346 	g_opts.shutdown_cb = shutdown_cb;
1347 	g_opts.rpc_addr = NULL;
1348 
1349 	rc = spdk_app_parse_args(argc, argv, &g_opts, "a:C:o:q:t:yw:M:P:f:T:l:S:x:", NULL,
1350 				 parse_args, usage);
1351 	if (rc != SPDK_APP_PARSE_ARGS_SUCCESS) {
1352 		return rc == SPDK_APP_PARSE_ARGS_HELP ? 0 : 1;
1353 	}
1354 
1355 	if (g_workload_selection == SPDK_ACCEL_OPC_LAST) {
1356 		fprintf(stderr, "Must provide a workload type\n");
1357 		usage();
1358 		return -1;
1359 	}
1360 
1361 	if (g_allocate_depth > 0 && g_queue_depth > g_allocate_depth) {
1362 		fprintf(stdout, "allocate depth must be at least as big as queue depth\n");
1363 		usage();
1364 		return -1;
1365 	}
1366 
1367 	if (g_allocate_depth == 0) {
1368 		g_allocate_depth = g_queue_depth;
1369 	}
1370 
1371 	if ((g_workload_selection == SPDK_ACCEL_OPC_CRC32C ||
1372 	     g_workload_selection == SPDK_ACCEL_OPC_COPY_CRC32C ||
1373 	     g_workload_selection == SPDK_ACCEL_OPC_DIF_VERIFY ||
1374 	     g_workload_selection == SPDK_ACCEL_OPC_DIF_GENERATE) &&
1375 	    g_chained_count == 0) {
1376 		usage();
1377 		return -1;
1378 	}
1379 
1380 	if (g_workload_selection == SPDK_ACCEL_OPC_XOR && g_xor_src_count < 2) {
1381 		usage();
1382 		return -1;
1383 	}
1384 
1385 	if (g_module_name && spdk_accel_assign_opc(g_workload_selection, g_module_name)) {
1386 		fprintf(stderr, "Was not able to assign '%s' module to the workload\n", g_module_name);
1387 		usage();
1388 		return -1;
1389 	}
1390 
1391 	g_rc = spdk_app_start(&g_opts, accel_perf_prep, NULL);
1392 	if (g_rc) {
1393 		SPDK_ERRLOG("ERROR starting application\n");
1394 	}
1395 
1396 	pthread_mutex_destroy(&g_workers_lock);
1397 
1398 	worker = g_workers;
1399 	while (worker) {
1400 		tmp = worker->next;
1401 		free(worker);
1402 		worker = tmp;
1403 	}
1404 	accel_perf_free_compress_segs();
1405 	spdk_app_fini();
1406 	return g_rc;
1407 }
1408