xref: /spdk/examples/accel/perf/accel_perf.c (revision 88754353c0d0b652180c26d04d248eca3da83cfc)
19f51cf32Spaul luse /*-
29f51cf32Spaul luse  *   BSD LICENSE
39f51cf32Spaul luse  *
49f51cf32Spaul luse  *   Copyright (c) Intel Corporation.
59f51cf32Spaul luse  *   All rights reserved.
69f51cf32Spaul luse  *
79f51cf32Spaul luse  *   Redistribution and use in source and binary forms, with or without
89f51cf32Spaul luse  *   modification, are permitted provided that the following conditions
99f51cf32Spaul luse  *   are met:
109f51cf32Spaul luse  *
119f51cf32Spaul luse  *     * Redistributions of source code must retain the above copyright
129f51cf32Spaul luse  *       notice, this list of conditions and the following disclaimer.
139f51cf32Spaul luse  *     * Redistributions in binary form must reproduce the above copyright
149f51cf32Spaul luse  *       notice, this list of conditions and the following disclaimer in
159f51cf32Spaul luse  *       the documentation and/or other materials provided with the
169f51cf32Spaul luse  *       distribution.
179f51cf32Spaul luse  *     * Neither the name of Intel Corporation nor the names of its
189f51cf32Spaul luse  *       contributors may be used to endorse or promote products derived
199f51cf32Spaul luse  *       from this software without specific prior written permission.
209f51cf32Spaul luse  *
219f51cf32Spaul luse  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
229f51cf32Spaul luse  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
239f51cf32Spaul luse  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
249f51cf32Spaul luse  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
259f51cf32Spaul luse  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
269f51cf32Spaul luse  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
279f51cf32Spaul luse  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
289f51cf32Spaul luse  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
299f51cf32Spaul luse  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
309f51cf32Spaul luse  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
319f51cf32Spaul luse  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
329f51cf32Spaul luse  */
339f51cf32Spaul luse 
349f51cf32Spaul luse #include "spdk/stdinc.h"
359f51cf32Spaul luse #include "spdk/thread.h"
369f51cf32Spaul luse #include "spdk/env.h"
379f51cf32Spaul luse #include "spdk/event.h"
389f51cf32Spaul luse #include "spdk/log.h"
399f51cf32Spaul luse #include "spdk/string.h"
409f51cf32Spaul luse #include "spdk/accel_engine.h"
41e69375bfSpaul luse #include "spdk/crc32.h"
420cecfcb1Spaul luse #include "spdk/util.h"
439f51cf32Spaul luse 
44b9218b7aSpaul luse #define DATA_PATTERN 0x5a
450ef079c6Spaul luse #define ALIGN_4K 0x1000
46b9218b7aSpaul luse 
479f51cf32Spaul luse static uint64_t	g_tsc_rate;
489f51cf32Spaul luse static uint64_t g_tsc_us_rate;
499f51cf32Spaul luse static uint64_t g_tsc_end;
509b189667Spaul luse static int g_rc;
519f51cf32Spaul luse static int g_xfer_size_bytes = 4096;
529f51cf32Spaul luse static int g_queue_depth = 32;
53f17e6705Spaul luse static int g_ops_per_batch = 0;
54445fe74eSpaul luse static int g_threads_per_core = 1;
559f51cf32Spaul luse static int g_time_in_sec = 5;
56e69375bfSpaul luse static uint32_t g_crc32c_seed = 0;
57*88754353SZiye Yang static uint32_t g_crc32c_chained_count = 1;
58b9218b7aSpaul luse static int g_fail_percent_goal = 0;
5989495464Spaul luse static uint8_t g_fill_pattern = 255;
609f51cf32Spaul luse static bool g_verify = false;
612a0c66d0Spaul luse static const char *g_workload_type = NULL;
62514be889Spaul luse static enum accel_capability g_workload_selection;
639f51cf32Spaul luse static struct worker_thread *g_workers = NULL;
649f51cf32Spaul luse static int g_num_workers = 0;
659f51cf32Spaul luse static pthread_mutex_t g_workers_lock = PTHREAD_MUTEX_INITIALIZER;
66a34fc12bSpaul luse uint64_t g_capabilites;
67cdefd3d3Spaul luse 
68cdefd3d3Spaul luse struct worker_thread;
69cdefd3d3Spaul luse static void accel_done(void *ref, int status);
70cdefd3d3Spaul luse 
71445fe74eSpaul luse struct display_info {
72445fe74eSpaul luse 	int core;
73445fe74eSpaul luse 	int thread;
74445fe74eSpaul luse };
75445fe74eSpaul luse 
76cdefd3d3Spaul luse struct ap_task {
77cdefd3d3Spaul luse 	void			*src;
78*88754353SZiye Yang 	struct iovec		*iovs;
79*88754353SZiye Yang 	uint32_t		iov_cnt;
80cdefd3d3Spaul luse 	void			*dst;
81cdefd3d3Spaul luse 	void			*dst2;
82cdefd3d3Spaul luse 	struct worker_thread	*worker;
83cdefd3d3Spaul luse 	int			status;
84cdefd3d3Spaul luse 	int			expected_status; /* used for the compare operation */
85cdefd3d3Spaul luse 	TAILQ_ENTRY(ap_task)	link;
86cdefd3d3Spaul luse };
879f51cf32Spaul luse 
88f17e6705Spaul luse struct accel_batch {
89f17e6705Spaul luse 	int				status;
90f17e6705Spaul luse 	int				cmd_count;
91f17e6705Spaul luse 	struct spdk_accel_batch		*batch;
92f17e6705Spaul luse 	struct worker_thread		*worker;
93f17e6705Spaul luse 	TAILQ_ENTRY(accel_batch)	link;
94f17e6705Spaul luse };
95f17e6705Spaul luse 
969f51cf32Spaul luse struct worker_thread {
979f51cf32Spaul luse 	struct spdk_io_channel		*ch;
989f51cf32Spaul luse 	uint64_t			xfer_completed;
999f51cf32Spaul luse 	uint64_t			xfer_failed;
100b9218b7aSpaul luse 	uint64_t			injected_miscompares;
1019f51cf32Spaul luse 	uint64_t			current_queue_depth;
102ac9a1a83Spaul luse 	TAILQ_HEAD(, ap_task)		tasks_pool;
1039f51cf32Spaul luse 	struct worker_thread		*next;
1049f51cf32Spaul luse 	unsigned			core;
1059f51cf32Spaul luse 	struct spdk_thread		*thread;
1069f51cf32Spaul luse 	bool				is_draining;
1079f51cf32Spaul luse 	struct spdk_poller		*is_draining_poller;
1089f51cf32Spaul luse 	struct spdk_poller		*stop_poller;
109ac9a1a83Spaul luse 	void				*task_base;
110f17e6705Spaul luse 	struct accel_batch		*batch_base;
111445fe74eSpaul luse 	struct display_info		display;
112f17e6705Spaul luse 	TAILQ_HEAD(, accel_batch)	in_prep_batches;
113f17e6705Spaul luse 	TAILQ_HEAD(, accel_batch)	in_use_batches;
114f17e6705Spaul luse 	TAILQ_HEAD(, accel_batch)	to_submit_batches;
1159f51cf32Spaul luse };
1169f51cf32Spaul luse 
1179f51cf32Spaul luse static void
1189f51cf32Spaul luse dump_user_config(struct spdk_app_opts *opts)
1199f51cf32Spaul luse {
1209f51cf32Spaul luse 	printf("SPDK Configuration:\n");
1219f51cf32Spaul luse 	printf("Core mask:      %s\n\n", opts->reactor_mask);
1229f51cf32Spaul luse 	printf("Accel Perf Configuration:\n");
1232a0c66d0Spaul luse 	printf("Workload Type:  %s\n", g_workload_type);
124b9218b7aSpaul luse 	if (g_workload_selection == ACCEL_CRC32C) {
125b9218b7aSpaul luse 		printf("CRC-32C seed:   %u\n", g_crc32c_seed);
126*88754353SZiye Yang 		printf("vector size:    %u\n", g_crc32c_chained_count);
12789495464Spaul luse 	} else if (g_workload_selection == ACCEL_FILL) {
12889495464Spaul luse 		printf("Fill pattern:   0x%x\n", g_fill_pattern);
129b9218b7aSpaul luse 	} else if ((g_workload_selection == ACCEL_COMPARE) && g_fail_percent_goal > 0) {
13089495464Spaul luse 		printf("Failure inject: %u percent\n", g_fail_percent_goal);
131e69375bfSpaul luse 	}
1329f51cf32Spaul luse 	printf("Transfer size:  %u bytes\n", g_xfer_size_bytes);
1339f51cf32Spaul luse 	printf("Queue depth:    %u\n", g_queue_depth);
134445fe74eSpaul luse 	printf("# threads/core: %u\n", g_threads_per_core);
1359f51cf32Spaul luse 	printf("Run time:       %u seconds\n", g_time_in_sec);
136f17e6705Spaul luse 	if (g_ops_per_batch > 0) {
137f17e6705Spaul luse 		printf("Batching:       %u operations\n", g_ops_per_batch);
138f17e6705Spaul luse 	} else {
139f17e6705Spaul luse 		printf("Batching:       Disabled\n");
140f17e6705Spaul luse 	}
1419f51cf32Spaul luse 	printf("Verify:         %s\n\n", g_verify ? "Yes" : "No");
1429f51cf32Spaul luse }
1439f51cf32Spaul luse 
1449f51cf32Spaul luse static void
1459f51cf32Spaul luse usage(void)
1469f51cf32Spaul luse {
1479f51cf32Spaul luse 	printf("accel_perf options:\n");
1489f51cf32Spaul luse 	printf("\t[-h help message]\n");
149f17e6705Spaul luse 	printf("\t[-q queue depth per core]\n");
150*88754353SZiye Yang 	printf("\t[-C for crc32c workload, use this value to configre the io vector size to test (default 1)\n");
151445fe74eSpaul luse 	printf("\t[-T number of threads per core\n");
152*88754353SZiye Yang 	printf("\t[-n number of channels]\n");
1539f51cf32Spaul luse 	printf("\t[-o transfer size in bytes]\n");
1549f51cf32Spaul luse 	printf("\t[-t time in seconds]\n");
1550ef079c6Spaul luse 	printf("\t[-w workload type must be one of these: copy, fill, crc32c, compare, dualcast\n");
156e69375bfSpaul luse 	printf("\t[-s for crc32c workload, use this seed value (default 0)\n");
157b9218b7aSpaul luse 	printf("\t[-P for compare workload, percentage of operations that should miscompare (percent, default 0)\n");
15889495464Spaul luse 	printf("\t[-f for fill workload, use this BYTE value (default 255)\n");
1592a0c66d0Spaul luse 	printf("\t[-y verify result if this switch is on]\n");
160f17e6705Spaul luse 	printf("\t[-b batch this number of operations at a time (default 0 = disabled)]\n");
1619f51cf32Spaul luse }
1629f51cf32Spaul luse 
1639f51cf32Spaul luse static int
1649f51cf32Spaul luse parse_args(int argc, char *argv)
1659f51cf32Spaul luse {
1669f51cf32Spaul luse 	switch (argc) {
167f17e6705Spaul luse 	case 'b':
168f17e6705Spaul luse 		g_ops_per_batch = spdk_strtol(optarg, 10);
169f17e6705Spaul luse 		break;
170*88754353SZiye Yang 	case 'C':
171*88754353SZiye Yang 		g_crc32c_chained_count = spdk_strtol(optarg, 10);
172*88754353SZiye Yang 		break;
17389495464Spaul luse 	case 'f':
17489495464Spaul luse 		g_fill_pattern = (uint8_t)spdk_strtol(optarg, 10);
17589495464Spaul luse 		break;
176445fe74eSpaul luse 	case 'T':
177445fe74eSpaul luse 		g_threads_per_core = spdk_strtol(optarg, 10);
178445fe74eSpaul luse 		break;
1799f51cf32Spaul luse 	case 'o':
1809f51cf32Spaul luse 		g_xfer_size_bytes = spdk_strtol(optarg, 10);
1819f51cf32Spaul luse 		break;
182b9218b7aSpaul luse 	case 'P':
183b9218b7aSpaul luse 		g_fail_percent_goal = spdk_strtol(optarg, 10);
184b9218b7aSpaul luse 		break;
1859f51cf32Spaul luse 	case 'q':
1869f51cf32Spaul luse 		g_queue_depth = spdk_strtol(optarg, 10);
1879f51cf32Spaul luse 		break;
188e69375bfSpaul luse 	case 's':
189e69375bfSpaul luse 		g_crc32c_seed = spdk_strtol(optarg, 10);
190e69375bfSpaul luse 		break;
1919f51cf32Spaul luse 	case 't':
1929f51cf32Spaul luse 		g_time_in_sec = spdk_strtol(optarg, 10);
1939f51cf32Spaul luse 		break;
1949f51cf32Spaul luse 	case 'y':
1959f51cf32Spaul luse 		g_verify = true;
1969f51cf32Spaul luse 		break;
1972a0c66d0Spaul luse 	case 'w':
1982a0c66d0Spaul luse 		g_workload_type = optarg;
199514be889Spaul luse 		if (!strcmp(g_workload_type, "copy")) {
200514be889Spaul luse 			g_workload_selection = ACCEL_COPY;
201514be889Spaul luse 		} else if (!strcmp(g_workload_type, "fill")) {
202514be889Spaul luse 			g_workload_selection = ACCEL_FILL;
203e69375bfSpaul luse 		} else if (!strcmp(g_workload_type, "crc32c")) {
204e69375bfSpaul luse 			g_workload_selection = ACCEL_CRC32C;
205b9218b7aSpaul luse 		} else if (!strcmp(g_workload_type, "compare")) {
206b9218b7aSpaul luse 			g_workload_selection = ACCEL_COMPARE;
2070ef079c6Spaul luse 		} else if (!strcmp(g_workload_type, "dualcast")) {
2080ef079c6Spaul luse 			g_workload_selection = ACCEL_DUALCAST;
209514be889Spaul luse 		}
2102a0c66d0Spaul luse 		break;
2119f51cf32Spaul luse 	default:
2129f51cf32Spaul luse 		usage();
2139f51cf32Spaul luse 		return 1;
2149f51cf32Spaul luse 	}
215*88754353SZiye Yang 
2169f51cf32Spaul luse 	return 0;
2179f51cf32Spaul luse }
2189f51cf32Spaul luse 
219eea826a2Spaul luse static int dump_result(void);
2209f51cf32Spaul luse static void
2219f51cf32Spaul luse unregister_worker(void *arg1)
2229f51cf32Spaul luse {
2239f51cf32Spaul luse 	struct worker_thread *worker = arg1;
2249f51cf32Spaul luse 
225ac9a1a83Spaul luse 	free(worker->task_base);
226f17e6705Spaul luse 	free(worker->batch_base);
2279f51cf32Spaul luse 	spdk_put_io_channel(worker->ch);
2289f51cf32Spaul luse 	pthread_mutex_lock(&g_workers_lock);
2299f51cf32Spaul luse 	assert(g_num_workers >= 1);
2309f51cf32Spaul luse 	if (--g_num_workers == 0) {
2319f51cf32Spaul luse 		pthread_mutex_unlock(&g_workers_lock);
2329b189667Spaul luse 		g_rc = dump_result();
2339f51cf32Spaul luse 		spdk_app_stop(0);
2349f51cf32Spaul luse 	}
2359f51cf32Spaul luse 	pthread_mutex_unlock(&g_workers_lock);
2369f51cf32Spaul luse }
2379f51cf32Spaul luse 
2388da995c4Spaul luse static int
2398da995c4Spaul luse _get_task_data_bufs(struct ap_task *task)
2408da995c4Spaul luse {
2418da995c4Spaul luse 	uint32_t align = 0;
242*88754353SZiye Yang 	uint32_t i = 0;
2438da995c4Spaul luse 
2448da995c4Spaul luse 	/* For dualcast, the DSA HW requires 4K alignment on destination addresses but
2458da995c4Spaul luse 	 * we do this for all engines to keep it simple.
2468da995c4Spaul luse 	 */
2478da995c4Spaul luse 	if (g_workload_selection == ACCEL_DUALCAST) {
2488da995c4Spaul luse 		align = ALIGN_4K;
2498da995c4Spaul luse 	}
2508da995c4Spaul luse 
251*88754353SZiye Yang 	if (g_workload_selection == ACCEL_CRC32C) {
252*88754353SZiye Yang 		assert(g_crc32c_chained_count > 0);
253*88754353SZiye Yang 		task->iov_cnt = g_crc32c_chained_count;
254*88754353SZiye Yang 		task->iovs = calloc(task->iov_cnt, sizeof(struct iovec));
255*88754353SZiye Yang 		if (!task->iovs) {
256*88754353SZiye Yang 			fprintf(stderr, "cannot allocated task->iovs fot task=%p\n", task);
257*88754353SZiye Yang 			return -ENOMEM;
258*88754353SZiye Yang 		}
259*88754353SZiye Yang 
260*88754353SZiye Yang 		for (i = 0; i < task->iov_cnt; i++) {
261*88754353SZiye Yang 			task->iovs[i].iov_base = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
262*88754353SZiye Yang 			if (task->iovs[i].iov_base == NULL) {
263*88754353SZiye Yang 				return -ENOMEM;
264*88754353SZiye Yang 			}
265*88754353SZiye Yang 			memset(task->iovs[i].iov_base, DATA_PATTERN, g_xfer_size_bytes);
266*88754353SZiye Yang 			task->iovs[i].iov_len = g_xfer_size_bytes;
267*88754353SZiye Yang 		}
268*88754353SZiye Yang 
269*88754353SZiye Yang 	} else {
2708da995c4Spaul luse 		task->src = spdk_dma_zmalloc(g_xfer_size_bytes, 0, NULL);
2718da995c4Spaul luse 		if (task->src == NULL) {
2728da995c4Spaul luse 			fprintf(stderr, "Unable to alloc src buffer\n");
2738da995c4Spaul luse 			return -ENOMEM;
2748da995c4Spaul luse 		}
275*88754353SZiye Yang 
276*88754353SZiye Yang 		/* For fill, set the entire src buffer so we can check if verify is enabled. */
277*88754353SZiye Yang 		if (g_workload_selection == ACCEL_FILL) {
278*88754353SZiye Yang 			memset(task->src, g_fill_pattern, g_xfer_size_bytes);
279*88754353SZiye Yang 		} else {
2808da995c4Spaul luse 			memset(task->src, DATA_PATTERN, g_xfer_size_bytes);
281*88754353SZiye Yang 		}
282*88754353SZiye Yang 	}
2838da995c4Spaul luse 
2848da995c4Spaul luse 	task->dst = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
2858da995c4Spaul luse 	if (task->dst == NULL) {
2868da995c4Spaul luse 		fprintf(stderr, "Unable to alloc dst buffer\n");
2878da995c4Spaul luse 		return -ENOMEM;
2888da995c4Spaul luse 	}
2898da995c4Spaul luse 
2908da995c4Spaul luse 	/* For compare we want the buffers to match, otherwise not. */
2918da995c4Spaul luse 	if (g_workload_selection == ACCEL_COMPARE) {
2928da995c4Spaul luse 		memset(task->dst, DATA_PATTERN, g_xfer_size_bytes);
2938da995c4Spaul luse 	} else {
2948da995c4Spaul luse 		memset(task->dst, ~DATA_PATTERN, g_xfer_size_bytes);
2958da995c4Spaul luse 	}
2968da995c4Spaul luse 
2978da995c4Spaul luse 	if (g_workload_selection == ACCEL_DUALCAST) {
2988da995c4Spaul luse 		task->dst2 = spdk_dma_zmalloc(g_xfer_size_bytes, align, NULL);
2998da995c4Spaul luse 		if (task->dst2 == NULL) {
3008da995c4Spaul luse 			fprintf(stderr, "Unable to alloc dst buffer\n");
3018da995c4Spaul luse 			return -ENOMEM;
3028da995c4Spaul luse 		}
3038da995c4Spaul luse 		memset(task->dst2, ~DATA_PATTERN, g_xfer_size_bytes);
3048da995c4Spaul luse 	}
3058da995c4Spaul luse 
3068da995c4Spaul luse 	return 0;
3078da995c4Spaul luse }
3088da995c4Spaul luse 
309ac9a1a83Spaul luse inline static struct ap_task *
310ac9a1a83Spaul luse _get_task(struct worker_thread *worker)
311ac9a1a83Spaul luse {
312ac9a1a83Spaul luse 	struct ap_task *task;
313ac9a1a83Spaul luse 
314ac9a1a83Spaul luse 	if (!TAILQ_EMPTY(&worker->tasks_pool)) {
315ac9a1a83Spaul luse 		task = TAILQ_FIRST(&worker->tasks_pool);
316ac9a1a83Spaul luse 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
317ac9a1a83Spaul luse 	} else {
318ac9a1a83Spaul luse 		fprintf(stderr, "Unable to get ap_task\n");
319ac9a1a83Spaul luse 		return NULL;
320ac9a1a83Spaul luse 	}
321ac9a1a83Spaul luse 
322ac9a1a83Spaul luse 	task->worker = worker;
323ac9a1a83Spaul luse 	task->worker->current_queue_depth++;
324ac9a1a83Spaul luse 	return task;
325ac9a1a83Spaul luse }
326ac9a1a83Spaul luse 
327f17e6705Spaul luse /* Submit one operation using the same ap task that just completed. */
3289f51cf32Spaul luse static void
329ac9a1a83Spaul luse _submit_single(struct worker_thread *worker, struct ap_task *task)
3309f51cf32Spaul luse {
331b9218b7aSpaul luse 	int random_num;
33240ec8e97Spaul luse 	int rc = 0;
3339f51cf32Spaul luse 
3349f51cf32Spaul luse 	assert(worker);
3359f51cf32Spaul luse 
336e69375bfSpaul luse 	switch (g_workload_selection) {
337e69375bfSpaul luse 	case ACCEL_COPY:
338e8463f87Spaul luse 		rc = spdk_accel_submit_copy(worker->ch, task->dst, task->src,
339e8463f87Spaul luse 					    g_xfer_size_bytes, accel_done, task);
340e69375bfSpaul luse 		break;
341e69375bfSpaul luse 	case ACCEL_FILL:
3422a0c66d0Spaul luse 		/* For fill use the first byte of the task->dst buffer */
343ee7e31f9Spaul luse 		rc = spdk_accel_submit_fill(worker->ch, task->dst, *(uint8_t *)task->src,
344e8463f87Spaul luse 					    g_xfer_size_bytes, accel_done, task);
345e69375bfSpaul luse 		break;
346e69375bfSpaul luse 	case ACCEL_CRC32C:
34790c56d96SZiye Yang 		rc = spdk_accel_submit_crc32cv(worker->ch, (uint32_t *)task->dst,
348*88754353SZiye Yang 					       task->iovs, task->iov_cnt, g_crc32c_seed,
34990c56d96SZiye Yang 					       accel_done, task);
350e69375bfSpaul luse 		break;
351b9218b7aSpaul luse 	case ACCEL_COMPARE:
352b9218b7aSpaul luse 		random_num = rand() % 100;
353b9218b7aSpaul luse 		if (random_num < g_fail_percent_goal) {
354b9218b7aSpaul luse 			task->expected_status = -EILSEQ;
355b9218b7aSpaul luse 			*(uint8_t *)task->dst = ~DATA_PATTERN;
356b9218b7aSpaul luse 		} else {
357b9218b7aSpaul luse 			task->expected_status = 0;
358b9218b7aSpaul luse 			*(uint8_t *)task->dst = DATA_PATTERN;
359b9218b7aSpaul luse 		}
360ee7e31f9Spaul luse 		rc = spdk_accel_submit_compare(worker->ch, task->dst, task->src,
361e8463f87Spaul luse 					       g_xfer_size_bytes, accel_done, task);
362b9218b7aSpaul luse 		break;
3630ef079c6Spaul luse 	case ACCEL_DUALCAST:
364ee7e31f9Spaul luse 		rc = spdk_accel_submit_dualcast(worker->ch, task->dst, task->dst2,
365e8463f87Spaul luse 						task->src, g_xfer_size_bytes, accel_done, task);
3660ef079c6Spaul luse 		break;
367e69375bfSpaul luse 	default:
3682a0c66d0Spaul luse 		assert(false);
369e69375bfSpaul luse 		break;
370e69375bfSpaul luse 
3712a0c66d0Spaul luse 	}
37240ec8e97Spaul luse 
37340ec8e97Spaul luse 	if (rc) {
374e8463f87Spaul luse 		accel_done(task, rc);
37540ec8e97Spaul luse 	}
3769f51cf32Spaul luse }
3779f51cf32Spaul luse 
378fab40895Spaul luse static int
379f17e6705Spaul luse _batch_prep_cmd(struct worker_thread *worker, struct ap_task *task,
380f17e6705Spaul luse 		struct accel_batch *worker_batch)
381fab40895Spaul luse {
382f17e6705Spaul luse 	struct spdk_accel_batch *batch = worker_batch->batch;
383fab40895Spaul luse 	int rc = 0;
384fab40895Spaul luse 
385f17e6705Spaul luse 	worker_batch->cmd_count++;
386f17e6705Spaul luse 	assert(worker_batch->cmd_count <= g_ops_per_batch);
387f17e6705Spaul luse 
388fab40895Spaul luse 	switch (g_workload_selection) {
389fab40895Spaul luse 	case ACCEL_COPY:
390fab40895Spaul luse 		rc = spdk_accel_batch_prep_copy(worker->ch, batch, task->dst,
391fab40895Spaul luse 						task->src, g_xfer_size_bytes, accel_done, task);
392fab40895Spaul luse 		break;
393fab40895Spaul luse 	case ACCEL_DUALCAST:
394fab40895Spaul luse 		rc = spdk_accel_batch_prep_dualcast(worker->ch, batch, task->dst, task->dst2,
395fab40895Spaul luse 						    task->src, g_xfer_size_bytes, accel_done, task);
396fab40895Spaul luse 		break;
397fab40895Spaul luse 	case ACCEL_COMPARE:
398fab40895Spaul luse 		rc = spdk_accel_batch_prep_compare(worker->ch, batch, task->dst, task->src,
399fab40895Spaul luse 						   g_xfer_size_bytes, accel_done, task);
400fab40895Spaul luse 		break;
401fab40895Spaul luse 	case ACCEL_FILL:
402fab40895Spaul luse 		rc = spdk_accel_batch_prep_fill(worker->ch, batch, task->dst,
403fab40895Spaul luse 						*(uint8_t *)task->src,
404fab40895Spaul luse 						g_xfer_size_bytes, accel_done, task);
405fab40895Spaul luse 		break;
406fab40895Spaul luse 	case ACCEL_CRC32C:
40790c56d96SZiye Yang 		rc = spdk_accel_batch_prep_crc32cv(worker->ch, batch, (uint32_t *)task->dst,
408*88754353SZiye Yang 						   task->iovs, task->iov_cnt, g_crc32c_seed, accel_done, task);
409fab40895Spaul luse 		break;
410fab40895Spaul luse 	default:
411fab40895Spaul luse 		assert(false);
412fab40895Spaul luse 		break;
413fab40895Spaul luse 	}
414fab40895Spaul luse 
415fab40895Spaul luse 	return rc;
416fab40895Spaul luse }
417fab40895Spaul luse 
4189f51cf32Spaul luse static void
419e150f6b8SZiye Yang _free_task_buffers(struct ap_task *task)
420ac9a1a83Spaul luse {
421*88754353SZiye Yang 	uint32_t i;
422*88754353SZiye Yang 
423*88754353SZiye Yang 	if (g_workload_selection == ACCEL_CRC32C) {
424*88754353SZiye Yang 		if (task->iovs) {
425*88754353SZiye Yang 			for (i = 0; i < task->iov_cnt; i++) {
426*88754353SZiye Yang 				if (task->iovs[i].iov_base) {
427*88754353SZiye Yang 					spdk_dma_free(task->iovs[i].iov_base);
428*88754353SZiye Yang 				}
429*88754353SZiye Yang 			}
430*88754353SZiye Yang 			free(task->iovs);
431*88754353SZiye Yang 		}
432*88754353SZiye Yang 	} else {
433ac9a1a83Spaul luse 		spdk_dma_free(task->src);
434*88754353SZiye Yang 	}
435*88754353SZiye Yang 
436ac9a1a83Spaul luse 	spdk_dma_free(task->dst);
437ac9a1a83Spaul luse 	if (g_workload_selection == ACCEL_DUALCAST) {
438ac9a1a83Spaul luse 		spdk_dma_free(task->dst2);
439ac9a1a83Spaul luse 	}
440ac9a1a83Spaul luse }
441ac9a1a83Spaul luse 
442f17e6705Spaul luse static void _batch_done(void *cb_arg);
443f17e6705Spaul luse static void
444f17e6705Spaul luse _build_batch(struct worker_thread *worker, struct ap_task *task)
445f17e6705Spaul luse {
446f17e6705Spaul luse 	struct accel_batch *worker_batch = NULL;
447f17e6705Spaul luse 	int rc;
448f17e6705Spaul luse 
449f17e6705Spaul luse 	assert(!TAILQ_EMPTY(&worker->in_prep_batches));
450f17e6705Spaul luse 
451f17e6705Spaul luse 	worker_batch = TAILQ_FIRST(&worker->in_prep_batches);
452f17e6705Spaul luse 
453f17e6705Spaul luse 	/* If an accel batch hasn't been created yet do so now. */
454f17e6705Spaul luse 	if (worker_batch->batch == NULL) {
455f17e6705Spaul luse 		worker_batch->batch = spdk_accel_batch_create(worker->ch);
456f17e6705Spaul luse 		if (worker_batch->batch == NULL) {
457f17e6705Spaul luse 			fprintf(stderr, "error unable to create new batch\n");
458f17e6705Spaul luse 			return;
459f17e6705Spaul luse 		}
460f17e6705Spaul luse 	}
461f17e6705Spaul luse 
462f17e6705Spaul luse 	/* Prep the command re-using the last completed command's task */
463f17e6705Spaul luse 	rc = _batch_prep_cmd(worker, task, worker_batch);
464f17e6705Spaul luse 	if (rc) {
465f17e6705Spaul luse 		fprintf(stderr, "error preping command for batch\n");
466f17e6705Spaul luse 		goto error;
467f17e6705Spaul luse 	}
468f17e6705Spaul luse 
469f17e6705Spaul luse 	/* If this batch is full move it to the to_submit list so it gets
470f17e6705Spaul luse 	 * submitted as batches complete.
471f17e6705Spaul luse 	 */
472f17e6705Spaul luse 	if (worker_batch->cmd_count == g_ops_per_batch) {
473f17e6705Spaul luse 		TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
474f17e6705Spaul luse 		TAILQ_INSERT_TAIL(&worker->to_submit_batches, worker_batch, link);
475f17e6705Spaul luse 	}
476f17e6705Spaul luse 
477f17e6705Spaul luse 	return;
478f17e6705Spaul luse error:
479f17e6705Spaul luse 	spdk_accel_batch_cancel(worker->ch, worker_batch->batch);
480f17e6705Spaul luse 
481f17e6705Spaul luse }
482f17e6705Spaul luse 
483f17e6705Spaul luse static void batch_done(void *cb_arg, int status);
484f17e6705Spaul luse static void
485f17e6705Spaul luse _drain_batch(struct worker_thread *worker)
486f17e6705Spaul luse {
487f17e6705Spaul luse 	struct accel_batch *worker_batch, *tmp;
488f17e6705Spaul luse 	int rc;
489f17e6705Spaul luse 
490f17e6705Spaul luse 	/* submit any batches that were being built up. */
491f17e6705Spaul luse 	TAILQ_FOREACH_SAFE(worker_batch, &worker->in_prep_batches, link, tmp) {
492f17e6705Spaul luse 		if (worker_batch->cmd_count == 0) {
493f17e6705Spaul luse 			continue;
494f17e6705Spaul luse 		}
495f17e6705Spaul luse 		worker->current_queue_depth += worker_batch->cmd_count + 1;
496f17e6705Spaul luse 
497f17e6705Spaul luse 		TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
498f17e6705Spaul luse 		TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
499f17e6705Spaul luse 		rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
500f17e6705Spaul luse 		if (rc == 0) {
501f17e6705Spaul luse 			worker_batch->cmd_count = 0;
502f17e6705Spaul luse 		} else {
503f17e6705Spaul luse 			fprintf(stderr, "error sending final batch\n");
504f17e6705Spaul luse 			worker->current_queue_depth -= worker_batch->cmd_count + 1;
505f17e6705Spaul luse 			break;
506f17e6705Spaul luse 		}
507f17e6705Spaul luse 	}
508f17e6705Spaul luse }
509f17e6705Spaul luse 
510f17e6705Spaul luse static void
511f17e6705Spaul luse _batch_done(void *cb_arg)
512f17e6705Spaul luse {
513f17e6705Spaul luse 	struct accel_batch *worker_batch = (struct accel_batch *)cb_arg;
514f17e6705Spaul luse 	struct worker_thread *worker = worker_batch->worker;
515f17e6705Spaul luse 	int rc;
516f17e6705Spaul luse 
517f17e6705Spaul luse 	assert(TAILQ_EMPTY(&worker->in_use_batches) == 0);
518f17e6705Spaul luse 
519f17e6705Spaul luse 	if (worker_batch->status) {
520f17e6705Spaul luse 		SPDK_ERRLOG("error %d\n", worker_batch->status);
521f17e6705Spaul luse 	}
522f17e6705Spaul luse 
523f17e6705Spaul luse 	worker->current_queue_depth--;
524f17e6705Spaul luse 	TAILQ_REMOVE(&worker->in_use_batches, worker_batch, link);
525f17e6705Spaul luse 	TAILQ_INSERT_TAIL(&worker->in_prep_batches, worker_batch, link);
526f17e6705Spaul luse 	worker_batch->batch = NULL;
527f17e6705Spaul luse 	worker_batch->cmd_count = 0;
528f17e6705Spaul luse 
529f17e6705Spaul luse 	if (!worker->is_draining) {
530f17e6705Spaul luse 		worker_batch = TAILQ_FIRST(&worker->to_submit_batches);
531f17e6705Spaul luse 		if (worker_batch != NULL) {
532f17e6705Spaul luse 
533f17e6705Spaul luse 			assert(worker_batch->cmd_count == g_ops_per_batch);
534f17e6705Spaul luse 
535f17e6705Spaul luse 			/* Add one for the batch command itself. */
536f17e6705Spaul luse 			worker->current_queue_depth += g_ops_per_batch + 1;
537f17e6705Spaul luse 			TAILQ_REMOVE(&worker->to_submit_batches, worker_batch, link);
538f17e6705Spaul luse 			TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
539f17e6705Spaul luse 
540f17e6705Spaul luse 			rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
541f17e6705Spaul luse 			if (rc) {
542f17e6705Spaul luse 				fprintf(stderr, "error ending batch\n");
543f17e6705Spaul luse 				worker->current_queue_depth -= g_ops_per_batch + 1;
544f17e6705Spaul luse 				return;
545f17e6705Spaul luse 			}
546f17e6705Spaul luse 		}
547f17e6705Spaul luse 	} else {
548f17e6705Spaul luse 		_drain_batch(worker);
549f17e6705Spaul luse 	}
550f17e6705Spaul luse }
551f17e6705Spaul luse 
552ac9a1a83Spaul luse static void
553fab40895Spaul luse batch_done(void *cb_arg, int status)
554fab40895Spaul luse {
555f17e6705Spaul luse 	struct accel_batch *worker_batch = (struct accel_batch *)cb_arg;
556fab40895Spaul luse 
557f17e6705Spaul luse 	assert(worker_batch->worker);
558f17e6705Spaul luse 
559f17e6705Spaul luse 	worker_batch->status = status;
560f17e6705Spaul luse 	spdk_thread_send_msg(worker_batch->worker->thread, _batch_done, worker_batch);
561fab40895Spaul luse }
562fab40895Spaul luse 
563*88754353SZiye Yang static uint32_t
564*88754353SZiye Yang _update_crc32c_iov(struct iovec *iov, int iovcnt, uint32_t crc32c)
565*88754353SZiye Yang {
566*88754353SZiye Yang 	int i;
567*88754353SZiye Yang 
568*88754353SZiye Yang 	for (i = 0; i < iovcnt; i++) {
569*88754353SZiye Yang 		assert(iov[i].iov_base != NULL);
570*88754353SZiye Yang 		assert(iov[i].iov_len != 0);
571*88754353SZiye Yang 		crc32c = spdk_crc32c_update(iov[i].iov_base, iov[i].iov_len, crc32c);
572*88754353SZiye Yang 
573*88754353SZiye Yang 	}
574*88754353SZiye Yang 	return crc32c;
575*88754353SZiye Yang }
576*88754353SZiye Yang 
577fab40895Spaul luse static void
5789f51cf32Spaul luse _accel_done(void *arg1)
5799f51cf32Spaul luse {
5809f51cf32Spaul luse 	struct ap_task *task = arg1;
5819f51cf32Spaul luse 	struct worker_thread *worker = task->worker;
582e69375bfSpaul luse 	uint32_t sw_crc32c;
5839f51cf32Spaul luse 
5849f51cf32Spaul luse 	assert(worker);
5859f51cf32Spaul luse 	assert(worker->current_queue_depth > 0);
5869f51cf32Spaul luse 
587b9218b7aSpaul luse 	if (g_verify && task->status == 0) {
588b9218b7aSpaul luse 		switch (g_workload_selection) {
589b9218b7aSpaul luse 		case ACCEL_CRC32C:
590*88754353SZiye Yang 			sw_crc32c = _update_crc32c_iov(task->iovs, task->iov_cnt, ~g_crc32c_seed);
591e69375bfSpaul luse 			if (*(uint32_t *)task->dst != sw_crc32c) {
592e69375bfSpaul luse 				SPDK_NOTICELOG("CRC-32C miscompare\n");
593e69375bfSpaul luse 				worker->xfer_failed++;
594e69375bfSpaul luse 			}
595b9218b7aSpaul luse 			break;
596b9218b7aSpaul luse 		case ACCEL_COPY:
597b9218b7aSpaul luse 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
5989f51cf32Spaul luse 				SPDK_NOTICELOG("Data miscompare\n");
5999f51cf32Spaul luse 				worker->xfer_failed++;
600b9218b7aSpaul luse 			}
601b9218b7aSpaul luse 			break;
6020ef079c6Spaul luse 		case ACCEL_DUALCAST:
6030ef079c6Spaul luse 			if (memcmp(task->src, task->dst, g_xfer_size_bytes)) {
6040ef079c6Spaul luse 				SPDK_NOTICELOG("Data miscompare, first destination\n");
6050ef079c6Spaul luse 				worker->xfer_failed++;
6060ef079c6Spaul luse 			}
6070ef079c6Spaul luse 			if (memcmp(task->src, task->dst2, g_xfer_size_bytes)) {
6080ef079c6Spaul luse 				SPDK_NOTICELOG("Data miscompare, second destination\n");
6090ef079c6Spaul luse 				worker->xfer_failed++;
6100ef079c6Spaul luse 			}
6110ef079c6Spaul luse 			break;
612d207237fSpaul luse 		case ACCEL_FILL:
613d207237fSpaul luse 			if (memcmp(task->dst, task->src, g_xfer_size_bytes)) {
614d207237fSpaul luse 				SPDK_NOTICELOG("Data miscompare\n");
615d207237fSpaul luse 				worker->xfer_failed++;
616d207237fSpaul luse 			}
617d207237fSpaul luse 			break;
6188cee297cSpaul luse 		case ACCEL_COMPARE:
6198cee297cSpaul luse 			break;
620b9218b7aSpaul luse 		default:
621b9218b7aSpaul luse 			assert(false);
622b9218b7aSpaul luse 			break;
6239f51cf32Spaul luse 		}
6249f51cf32Spaul luse 	}
625b9218b7aSpaul luse 
626b9218b7aSpaul luse 	if (task->expected_status == -EILSEQ) {
627b9218b7aSpaul luse 		assert(task->status != 0);
628b9218b7aSpaul luse 		worker->injected_miscompares++;
629b9218b7aSpaul luse 	} else if (task->status) {
630f17e6705Spaul luse 		/* Expected to pass but the accel engine reported an error (ex: COMPARE operation). */
631b9218b7aSpaul luse 		worker->xfer_failed++;
632b9218b7aSpaul luse 	}
633b9218b7aSpaul luse 
6349f51cf32Spaul luse 	worker->xfer_completed++;
6359f51cf32Spaul luse 	worker->current_queue_depth--;
6369f51cf32Spaul luse 
63740ec8e97Spaul luse 	if (!worker->is_draining) {
638f17e6705Spaul luse 		if (g_ops_per_batch == 0) {
6399f51cf32Spaul luse 			_submit_single(worker, task);
640ac9a1a83Spaul luse 			worker->current_queue_depth++;
641f17e6705Spaul luse 		} else {
642f17e6705Spaul luse 			_build_batch(worker, task);
6439f51cf32Spaul luse 		}
644f17e6705Spaul luse 	} else if (g_ops_per_batch > 0) {
645f17e6705Spaul luse 		_drain_batch(worker);
646b34883e0SZiye Yang 	} else {
647b34883e0SZiye Yang 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
648f17e6705Spaul luse 	}
6499f51cf32Spaul luse }
6509f51cf32Spaul luse 
6519f51cf32Spaul luse static int
6529f51cf32Spaul luse dump_result(void)
6539f51cf32Spaul luse {
6549f51cf32Spaul luse 	uint64_t total_completed = 0;
6559f51cf32Spaul luse 	uint64_t total_failed = 0;
656b9218b7aSpaul luse 	uint64_t total_miscompared = 0;
6579f51cf32Spaul luse 	uint64_t total_xfer_per_sec, total_bw_in_MiBps;
6589f51cf32Spaul luse 	struct worker_thread *worker = g_workers;
6599f51cf32Spaul luse 
660445fe74eSpaul luse 	printf("\nCore,Thread   Transfers     Bandwidth     Failed     Miscompares\n");
661445fe74eSpaul luse 	printf("------------------------------------------------------------------------\n");
6629f51cf32Spaul luse 	while (worker != NULL) {
6639f51cf32Spaul luse 
6649f51cf32Spaul luse 		uint64_t xfer_per_sec = worker->xfer_completed / g_time_in_sec;
6659f51cf32Spaul luse 		uint64_t bw_in_MiBps = (worker->xfer_completed * g_xfer_size_bytes) /
6669f51cf32Spaul luse 				       (g_time_in_sec * 1024 * 1024);
6679f51cf32Spaul luse 
6689f51cf32Spaul luse 		total_completed += worker->xfer_completed;
6699f51cf32Spaul luse 		total_failed += worker->xfer_failed;
670b9218b7aSpaul luse 		total_miscompared += worker->injected_miscompares;
6719f51cf32Spaul luse 
6729f51cf32Spaul luse 		if (xfer_per_sec) {
673445fe74eSpaul luse 			printf("%u,%u%17" PRIu64 "/s%9" PRIu64 " MiB/s%7" PRIu64 " %11" PRIu64 "\n",
674445fe74eSpaul luse 			       worker->display.core, worker->display.thread, xfer_per_sec,
675b9218b7aSpaul luse 			       bw_in_MiBps, worker->xfer_failed, worker->injected_miscompares);
6769f51cf32Spaul luse 		}
6779f51cf32Spaul luse 
6789f51cf32Spaul luse 		worker = worker->next;
6799f51cf32Spaul luse 	}
6809f51cf32Spaul luse 
6819f51cf32Spaul luse 	total_xfer_per_sec = total_completed / g_time_in_sec;
6829f51cf32Spaul luse 	total_bw_in_MiBps = (total_completed * g_xfer_size_bytes) /
6839f51cf32Spaul luse 			    (g_time_in_sec * 1024 * 1024);
6849f51cf32Spaul luse 
685445fe74eSpaul luse 	printf("=========================================================================\n");
686445fe74eSpaul luse 	printf("Total:%15" PRIu64 "/s%9" PRIu64 " MiB/s%6" PRIu64 " %11" PRIu64"\n\n",
687b9218b7aSpaul luse 	       total_xfer_per_sec, total_bw_in_MiBps, total_failed, total_miscompared);
6889f51cf32Spaul luse 
6899f51cf32Spaul luse 	return total_failed ? 1 : 0;
6909f51cf32Spaul luse }
6919f51cf32Spaul luse 
692e150f6b8SZiye Yang static inline void
693e150f6b8SZiye Yang _free_task_buffers_in_pool(struct worker_thread *worker)
694e150f6b8SZiye Yang {
695e150f6b8SZiye Yang 	struct ap_task *task;
696e150f6b8SZiye Yang 
697e150f6b8SZiye Yang 	assert(worker);
698e150f6b8SZiye Yang 	while ((task = TAILQ_FIRST(&worker->tasks_pool))) {
699e150f6b8SZiye Yang 		TAILQ_REMOVE(&worker->tasks_pool, task, link);
700e150f6b8SZiye Yang 		_free_task_buffers(task);
701e150f6b8SZiye Yang 	}
702e150f6b8SZiye Yang }
703e150f6b8SZiye Yang 
7049f51cf32Spaul luse static int
7059f51cf32Spaul luse _check_draining(void *arg)
7069f51cf32Spaul luse {
7079f51cf32Spaul luse 	struct worker_thread *worker = arg;
7089f51cf32Spaul luse 
7099f51cf32Spaul luse 	assert(worker);
7109f51cf32Spaul luse 
7119f51cf32Spaul luse 	if (worker->current_queue_depth == 0) {
712e150f6b8SZiye Yang 		_free_task_buffers_in_pool(worker);
7139f51cf32Spaul luse 		spdk_poller_unregister(&worker->is_draining_poller);
7149f51cf32Spaul luse 		unregister_worker(worker);
7159f51cf32Spaul luse 	}
7169f51cf32Spaul luse 
7179f51cf32Spaul luse 	return -1;
7189f51cf32Spaul luse }
7199f51cf32Spaul luse 
7209f51cf32Spaul luse static int
7219f51cf32Spaul luse _worker_stop(void *arg)
7229f51cf32Spaul luse {
7239f51cf32Spaul luse 	struct worker_thread *worker = arg;
7249f51cf32Spaul luse 
7259f51cf32Spaul luse 	assert(worker);
7269f51cf32Spaul luse 
7279f51cf32Spaul luse 	spdk_poller_unregister(&worker->stop_poller);
7289f51cf32Spaul luse 
7299f51cf32Spaul luse 	/* now let the worker drain and check it's outstanding IO with a poller */
7309f51cf32Spaul luse 	worker->is_draining = true;
731ab0bc5c2SShuhei Matsumoto 	worker->is_draining_poller = SPDK_POLLER_REGISTER(_check_draining, worker, 0);
7329f51cf32Spaul luse 
7339f51cf32Spaul luse 	return 0;
7349f51cf32Spaul luse }
7359f51cf32Spaul luse 
7369f51cf32Spaul luse static void
737a34fc12bSpaul luse _init_thread(void *arg1)
738a34fc12bSpaul luse {
739a34fc12bSpaul luse 	struct worker_thread *worker;
740a34fc12bSpaul luse 	struct ap_task *task;
741f17e6705Spaul luse 	int i, rc, num_batches;
742f17e6705Spaul luse 	int max_per_batch;
743a34fc12bSpaul luse 	int remaining = g_queue_depth;
744f17e6705Spaul luse 	int num_tasks = g_queue_depth;
745f17e6705Spaul luse 	struct accel_batch *tmp;
746f17e6705Spaul luse 	struct accel_batch *worker_batch = NULL;
747445fe74eSpaul luse 	struct display_info *display = arg1;
748a34fc12bSpaul luse 
749a34fc12bSpaul luse 	worker = calloc(1, sizeof(*worker));
750a34fc12bSpaul luse 	if (worker == NULL) {
751a34fc12bSpaul luse 		fprintf(stderr, "Unable to allocate worker\n");
752445fe74eSpaul luse 		free(display);
753a34fc12bSpaul luse 		return;
754a34fc12bSpaul luse 	}
755a34fc12bSpaul luse 
756445fe74eSpaul luse 	worker->display.core = display->core;
757445fe74eSpaul luse 	worker->display.thread = display->thread;
758445fe74eSpaul luse 	free(display);
7599f51cf32Spaul luse 	worker->core = spdk_env_get_current_core();
7609f51cf32Spaul luse 	worker->thread = spdk_get_thread();
761eea826a2Spaul luse 	pthread_mutex_lock(&g_workers_lock);
762eea826a2Spaul luse 	g_num_workers++;
7639f51cf32Spaul luse 	worker->next = g_workers;
764eea826a2Spaul luse 	g_workers = worker;
765eea826a2Spaul luse 	pthread_mutex_unlock(&g_workers_lock);
7669f51cf32Spaul luse 	worker->ch = spdk_accel_engine_get_io_channel();
767b9218b7aSpaul luse 
768f17e6705Spaul luse 	TAILQ_INIT(&worker->tasks_pool);
769f17e6705Spaul luse 
770f17e6705Spaul luse 	if (g_ops_per_batch > 0) {
771f17e6705Spaul luse 
7720cecfcb1Spaul luse 		max_per_batch = spdk_accel_batch_get_max(worker->ch);
7730cecfcb1Spaul luse 		assert(max_per_batch > 0);
7740cecfcb1Spaul luse 
775f17e6705Spaul luse 		if (g_ops_per_batch > max_per_batch) {
776f17e6705Spaul luse 			fprintf(stderr, "Reducing requested batch amount to max supported of %d\n", max_per_batch);
777f17e6705Spaul luse 			g_ops_per_batch = max_per_batch;
778f17e6705Spaul luse 		}
779f17e6705Spaul luse 
780f17e6705Spaul luse 		if (g_ops_per_batch > g_queue_depth) {
781f17e6705Spaul luse 			fprintf(stderr, "Batch amount > queue depth, resetting to %d\n", g_queue_depth);
782f17e6705Spaul luse 			g_ops_per_batch = g_queue_depth;
783f17e6705Spaul luse 		}
784f17e6705Spaul luse 
785f17e6705Spaul luse 		TAILQ_INIT(&worker->in_prep_batches);
786f17e6705Spaul luse 		TAILQ_INIT(&worker->to_submit_batches);
787f17e6705Spaul luse 		TAILQ_INIT(&worker->in_use_batches);
788f17e6705Spaul luse 
789f17e6705Spaul luse 		/* A worker_batch will live on one of 3 lists:
790f17e6705Spaul luse 		 * IN_PREP: as individual IOs complete new ones are built on on a
791f17e6705Spaul luse 		 *          worker_batch on this list until it reaches g_ops_per_batch.
792f17e6705Spaul luse 		 * TO_SUBMIT: as batches are built up on IO completion they are moved
793f17e6705Spaul luse 		 *	      to this list once they are full.  This list is used in
794f17e6705Spaul luse 		 *	      batch completion to start new batches.
795f17e6705Spaul luse 		 * IN_USE: the worker_batch is outstanding and will be moved to in prep
796f17e6705Spaul luse 		 *         list when the batch is completed.
797f17e6705Spaul luse 		 *
798f17e6705Spaul luse 		 * So we need enough to cover Q depth loading and then one to replace
799f17e6705Spaul luse 		 * each one of those and for when everything is outstanding there needs
800f17e6705Spaul luse 		 * to be one extra batch to build up while the last batch is completing
801f17e6705Spaul luse 		 * IO but before it's completed the batch command.
802f17e6705Spaul luse 		 */
803f17e6705Spaul luse 		num_batches = (g_queue_depth / g_ops_per_batch * 2) + 1;
804f17e6705Spaul luse 		worker->batch_base = calloc(num_batches, sizeof(struct accel_batch));
805f17e6705Spaul luse 		worker_batch = worker->batch_base;
806f17e6705Spaul luse 		for (i = 0; i < num_batches; i++) {
807f17e6705Spaul luse 			worker_batch->worker = worker;
808f17e6705Spaul luse 			TAILQ_INSERT_TAIL(&worker->in_prep_batches, worker_batch, link);
809f17e6705Spaul luse 			worker_batch++;
810f17e6705Spaul luse 		}
811f17e6705Spaul luse 	}
812f17e6705Spaul luse 
813ac9a1a83Spaul luse 	worker->task_base = calloc(num_tasks, sizeof(struct ap_task));
814ac9a1a83Spaul luse 	if (worker->task_base == NULL) {
815ac9a1a83Spaul luse 		fprintf(stderr, "Could not allocate task base.\n");
816ac9a1a83Spaul luse 		goto error;
8170cecfcb1Spaul luse 	}
818ac9a1a83Spaul luse 
819ac9a1a83Spaul luse 	task = worker->task_base;
820ac9a1a83Spaul luse 	for (i = 0; i < num_tasks; i++) {
821ac9a1a83Spaul luse 		TAILQ_INSERT_TAIL(&worker->tasks_pool, task, link);
822ac9a1a83Spaul luse 		if (_get_task_data_bufs(task)) {
823ac9a1a83Spaul luse 			fprintf(stderr, "Unable to get data bufs\n");
824ac9a1a83Spaul luse 			goto error;
825ac9a1a83Spaul luse 		}
826ac9a1a83Spaul luse 		task++;
8279f51cf32Spaul luse 	}
8289f51cf32Spaul luse 
8299f51cf32Spaul luse 	/* Register a poller that will stop the worker at time elapsed */
830ab0bc5c2SShuhei Matsumoto 	worker->stop_poller = SPDK_POLLER_REGISTER(_worker_stop, worker,
8319f51cf32Spaul luse 			      g_time_in_sec * 1000000ULL);
8329f51cf32Spaul luse 
833f17e6705Spaul luse 	/* If batching is enabled load up to the full Q depth before
834f17e6705Spaul luse 	 * processing any completions, then ping pong between two batches,
835f17e6705Spaul luse 	 * one processing and one being built up for when the other completes.
836a34fc12bSpaul luse 	 */
837f17e6705Spaul luse 	if (g_ops_per_batch > 0) {
838a34fc12bSpaul luse 		do {
839f17e6705Spaul luse 			worker_batch = TAILQ_FIRST(&worker->in_prep_batches);
840f17e6705Spaul luse 			if (worker_batch == NULL) {
841f17e6705Spaul luse 				goto error;
842f17e6705Spaul luse 			}
843f17e6705Spaul luse 
844f17e6705Spaul luse 			worker_batch->batch = spdk_accel_batch_create(worker->ch);
845f17e6705Spaul luse 			if (worker_batch->batch == NULL) {
846f17e6705Spaul luse 				raise(SIGINT);
847a34fc12bSpaul luse 				break;
848a34fc12bSpaul luse 			}
849a34fc12bSpaul luse 
850f17e6705Spaul luse 			for (i = 0; i < g_ops_per_batch; i++) {
851ac9a1a83Spaul luse 				task = _get_task(worker);
852ac9a1a83Spaul luse 				if (task == NULL) {
853a34fc12bSpaul luse 					goto error;
8549f51cf32Spaul luse 				}
855b9218b7aSpaul luse 
856f17e6705Spaul luse 				rc = _batch_prep_cmd(worker, task, worker_batch);
857a34fc12bSpaul luse 				if (rc) {
858a34fc12bSpaul luse 					fprintf(stderr, "error preping command\n");
859a34fc12bSpaul luse 					goto error;
860a34fc12bSpaul luse 				}
861a34fc12bSpaul luse 			}
862a34fc12bSpaul luse 
863f17e6705Spaul luse 			/* for the batch operation itself. */
864f17e6705Spaul luse 			task->worker->current_queue_depth++;
865f17e6705Spaul luse 			TAILQ_REMOVE(&worker->in_prep_batches, worker_batch, link);
866f17e6705Spaul luse 			TAILQ_INSERT_TAIL(&worker->in_use_batches, worker_batch, link);
867f17e6705Spaul luse 
868f17e6705Spaul luse 			rc = spdk_accel_batch_submit(worker->ch, worker_batch->batch, batch_done, worker_batch);
869a34fc12bSpaul luse 			if (rc) {
870f17e6705Spaul luse 				fprintf(stderr, "error ending batch\n");
871a34fc12bSpaul luse 				goto error;
872a34fc12bSpaul luse 			}
873f17e6705Spaul luse 			assert(remaining >= g_ops_per_batch);
874f17e6705Spaul luse 			remaining -= g_ops_per_batch;
875f17e6705Spaul luse 		} while (remaining > 0);
876b9218b7aSpaul luse 	}
8770ef079c6Spaul luse 
878f17e6705Spaul luse 	/* Submit as singles when no batching is enabled or we ran out of batches. */
879a34fc12bSpaul luse 	for (i = 0; i < remaining; i++) {
880ac9a1a83Spaul luse 		task = _get_task(worker);
881ac9a1a83Spaul luse 		if (task == NULL) {
882a34fc12bSpaul luse 			goto error;
883b9218b7aSpaul luse 		}
884b9218b7aSpaul luse 
8859f51cf32Spaul luse 		_submit_single(worker, task);
8869f51cf32Spaul luse 	}
887a34fc12bSpaul luse 	return;
888a34fc12bSpaul luse error:
889f17e6705Spaul luse 	if (worker_batch && worker_batch->batch) {
890f17e6705Spaul luse 		TAILQ_FOREACH_SAFE(worker_batch, &worker->in_use_batches, link, tmp) {
891f17e6705Spaul luse 			spdk_accel_batch_cancel(worker->ch, worker_batch->batch);
892f17e6705Spaul luse 			TAILQ_REMOVE(&worker->in_use_batches, worker_batch, link);
893f17e6705Spaul luse 		}
894f17e6705Spaul luse 	}
895e150f6b8SZiye Yang 
896e150f6b8SZiye Yang 	_free_task_buffers_in_pool(worker);
897f17e6705Spaul luse 	free(worker->batch_base);
898ac9a1a83Spaul luse 	free(worker->task_base);
899a34fc12bSpaul luse 	free(worker);
900a34fc12bSpaul luse 	spdk_app_stop(-1);
9019f51cf32Spaul luse }
9029f51cf32Spaul luse 
9039f51cf32Spaul luse static void
904e8463f87Spaul luse accel_done(void *cb_arg, int status)
9059f51cf32Spaul luse {
906e8463f87Spaul luse 	struct ap_task *task = (struct ap_task *)cb_arg;
9079f51cf32Spaul luse 	struct worker_thread *worker = task->worker;
9089f51cf32Spaul luse 
9099f51cf32Spaul luse 	assert(worker);
9109f51cf32Spaul luse 
911b9218b7aSpaul luse 	task->status = status;
9129f51cf32Spaul luse 	spdk_thread_send_msg(worker->thread, _accel_done, task);
9139f51cf32Spaul luse }
9149f51cf32Spaul luse 
9159f51cf32Spaul luse static void
9169f51cf32Spaul luse accel_perf_start(void *arg1)
9179f51cf32Spaul luse {
918514be889Spaul luse 	struct spdk_io_channel *accel_ch;
919eea826a2Spaul luse 	struct spdk_cpuset tmp_cpumask = {};
920eea826a2Spaul luse 	char thread_name[32];
921eea826a2Spaul luse 	uint32_t i;
922445fe74eSpaul luse 	int j;
923eea826a2Spaul luse 	struct spdk_thread *thread;
924445fe74eSpaul luse 	struct display_info *display;
925514be889Spaul luse 
926514be889Spaul luse 	accel_ch = spdk_accel_engine_get_io_channel();
927a34fc12bSpaul luse 	g_capabilites = spdk_accel_get_capabilities(accel_ch);
928514be889Spaul luse 	spdk_put_io_channel(accel_ch);
929514be889Spaul luse 
930a34fc12bSpaul luse 	if ((g_capabilites & g_workload_selection) != g_workload_selection) {
931a7dfca5bSpaul luse 		SPDK_WARNLOG("The selected workload is not natively supported by the current engine\n");
932a7dfca5bSpaul luse 		SPDK_WARNLOG("The software engine will be used instead.\n\n");
933514be889Spaul luse 	}
934514be889Spaul luse 
9359f51cf32Spaul luse 	g_tsc_rate = spdk_get_ticks_hz();
9369f51cf32Spaul luse 	g_tsc_us_rate = g_tsc_rate / (1000 * 1000);
9379f51cf32Spaul luse 	g_tsc_end = spdk_get_ticks() + g_time_in_sec * g_tsc_rate;
9389f51cf32Spaul luse 
9399f51cf32Spaul luse 	printf("Running for %d seconds...\n", g_time_in_sec);
9409f51cf32Spaul luse 	fflush(stdout);
9419f51cf32Spaul luse 
942eea826a2Spaul luse 	/* Create worker threads for each core that was specified. */
943eea826a2Spaul luse 	SPDK_ENV_FOREACH_CORE(i) {
944445fe74eSpaul luse 		for (j = 0; j < g_threads_per_core; j++) {
945445fe74eSpaul luse 			snprintf(thread_name, sizeof(thread_name), "ap_worker_%u_%u", i, j);
946eea826a2Spaul luse 			spdk_cpuset_zero(&tmp_cpumask);
947eea826a2Spaul luse 			spdk_cpuset_set_cpu(&tmp_cpumask, i, true);
948eea826a2Spaul luse 			thread = spdk_thread_create(thread_name, &tmp_cpumask);
949445fe74eSpaul luse 			display = calloc(1, sizeof(*display));
950445fe74eSpaul luse 			if (display == NULL) {
951445fe74eSpaul luse 				fprintf(stderr, "Unable to allocate memory\n");
952445fe74eSpaul luse 				spdk_app_stop(-1);
953445fe74eSpaul luse 				return;
954445fe74eSpaul luse 			}
955445fe74eSpaul luse 			display->core = i;
956445fe74eSpaul luse 			display->thread = j;
957445fe74eSpaul luse 			spdk_thread_send_msg(thread, _init_thread, display);
958445fe74eSpaul luse 		}
959eea826a2Spaul luse 	}
9609f51cf32Spaul luse }
9619f51cf32Spaul luse 
9629f51cf32Spaul luse int
9639f51cf32Spaul luse main(int argc, char **argv)
9649f51cf32Spaul luse {
9659f51cf32Spaul luse 	struct spdk_app_opts opts = {};
9669f51cf32Spaul luse 	struct worker_thread *worker, *tmp;
9679f51cf32Spaul luse 
9689f51cf32Spaul luse 	pthread_mutex_init(&g_workers_lock, NULL);
96948701bd9SZiye Yang 	spdk_app_opts_init(&opts, sizeof(opts));
9709f51cf32Spaul luse 	opts.reactor_mask = "0x1";
971*88754353SZiye Yang 	if (spdk_app_parse_args(argc, argv, &opts, "C:o:q:t:yw:P:f:b:T:", NULL, parse_args,
9721e2b38baSyidong0635 				usage) != SPDK_APP_PARSE_ARGS_SUCCESS) {
9739b189667Spaul luse 		g_rc = -1;
9749f51cf32Spaul luse 		goto cleanup;
9759f51cf32Spaul luse 	}
9769f51cf32Spaul luse 
977b9218b7aSpaul luse 	if ((g_workload_selection != ACCEL_COPY) &&
978b9218b7aSpaul luse 	    (g_workload_selection != ACCEL_FILL) &&
979b9218b7aSpaul luse 	    (g_workload_selection != ACCEL_CRC32C) &&
9800ef079c6Spaul luse 	    (g_workload_selection != ACCEL_COMPARE) &&
9810ef079c6Spaul luse 	    (g_workload_selection != ACCEL_DUALCAST)) {
9822a0c66d0Spaul luse 		usage();
9839b189667Spaul luse 		g_rc = -1;
9842a0c66d0Spaul luse 		goto cleanup;
9852a0c66d0Spaul luse 	}
9862a0c66d0Spaul luse 
987f17e6705Spaul luse 	if (g_ops_per_batch > 0 && (g_queue_depth % g_ops_per_batch > 0)) {
988f17e6705Spaul luse 		fprintf(stdout, "batch size must be a multiple of queue depth\n");
989f17e6705Spaul luse 		usage();
9909b189667Spaul luse 		g_rc = -1;
991f17e6705Spaul luse 		goto cleanup;
992f17e6705Spaul luse 	}
993f17e6705Spaul luse 
994*88754353SZiye Yang 	if (g_workload_selection == ACCEL_CRC32C &&
995*88754353SZiye Yang 	    g_crc32c_chained_count == 0) {
996*88754353SZiye Yang 		usage();
997*88754353SZiye Yang 		g_rc = -1;
998*88754353SZiye Yang 		goto cleanup;
999*88754353SZiye Yang 	}
1000*88754353SZiye Yang 
10019f51cf32Spaul luse 	dump_user_config(&opts);
10029b189667Spaul luse 	g_rc = spdk_app_start(&opts, accel_perf_start, NULL);
10039b189667Spaul luse 	if (g_rc) {
10049f51cf32Spaul luse 		SPDK_ERRLOG("ERROR starting application\n");
10059f51cf32Spaul luse 	}
10069f51cf32Spaul luse 
10079f51cf32Spaul luse 	pthread_mutex_destroy(&g_workers_lock);
10089f51cf32Spaul luse 
10099f51cf32Spaul luse 	worker = g_workers;
10109f51cf32Spaul luse 	while (worker) {
10119f51cf32Spaul luse 		tmp = worker->next;
10129f51cf32Spaul luse 		free(worker);
10139f51cf32Spaul luse 		worker = tmp;
10149f51cf32Spaul luse 	}
10159f51cf32Spaul luse cleanup:
10169f51cf32Spaul luse 	spdk_app_fini();
10179b189667Spaul luse 	return g_rc;
10189f51cf32Spaul luse }
1019