xref: /spdk/lib/accel/accel.c (revision b3bec07939ebe2ea2e0c43931705d32aa9e06719)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
39 
40 struct accel_module {
41 	struct spdk_accel_module_if	*module;
42 	bool				supports_memory_domains;
43 };
44 
45 /* Largest context size for all accel modules */
46 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
47 
48 static struct spdk_accel_module_if *g_accel_module = NULL;
49 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
50 static void *g_fini_cb_arg = NULL;
51 static bool g_modules_started = false;
52 static struct spdk_memory_domain *g_accel_domain;
53 
54 /* Global list of registered accelerator modules */
55 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
56 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
57 
58 /* Crypto keyring */
59 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
60 static struct spdk_spinlock g_keyring_spin;
61 
62 /* Global array mapping capabilities to modules */
63 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
64 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
65 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
66 static struct spdk_accel_driver *g_accel_driver;
67 static struct spdk_accel_opts g_opts = {
68 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
69 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
70 	.task_count = MAX_TASKS_PER_CHANNEL,
71 	.sequence_count = MAX_TASKS_PER_CHANNEL,
72 	.buf_count = MAX_TASKS_PER_CHANNEL,
73 };
74 static struct accel_stats g_stats;
75 static struct spdk_spinlock g_stats_lock;
76 
77 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
78 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
79 	"compress", "decompress", "encrypt", "decrypt", "xor"
80 };
81 
82 enum accel_sequence_state {
83 	ACCEL_SEQUENCE_STATE_INIT,
84 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
85 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
86 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
87 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
88 	ACCEL_SEQUENCE_STATE_PULL_DATA,
89 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
90 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
91 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
92 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
93 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
94 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
95 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
96 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
97 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
98 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
99 	ACCEL_SEQUENCE_STATE_ERROR,
100 	ACCEL_SEQUENCE_STATE_MAX,
101 };
102 
103 static const char *g_seq_states[]
104 __attribute__((unused)) = {
105 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
106 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
107 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
108 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
109 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
110 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
112 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
114 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
115 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
116 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
117 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
118 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
119 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
121 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
122 	[ACCEL_SEQUENCE_STATE_MAX] = "",
123 };
124 
125 #define ACCEL_SEQUENCE_STATE_STRING(s) \
126 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
127 	 ? g_seq_states[s] : "unknown")
128 
129 struct accel_buffer {
130 	struct spdk_accel_sequence	*seq;
131 	void				*buf;
132 	uint64_t			len;
133 	struct spdk_iobuf_entry		iobuf;
134 	spdk_accel_sequence_get_buf_cb	cb_fn;
135 	void				*cb_ctx;
136 	TAILQ_ENTRY(accel_buffer)	link;
137 };
138 
139 struct accel_io_channel {
140 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
141 	struct spdk_io_channel			*driver_channel;
142 	void					*task_pool_base;
143 	struct spdk_accel_sequence		*seq_pool_base;
144 	struct accel_buffer			*buf_pool_base;
145 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
146 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
147 	TAILQ_HEAD(, accel_buffer)		buf_pool;
148 	struct spdk_iobuf_channel		iobuf;
149 	struct accel_stats			stats;
150 };
151 
152 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
153 
154 struct spdk_accel_sequence {
155 	struct accel_io_channel			*ch;
156 	struct accel_sequence_tasks		tasks;
157 	struct accel_sequence_tasks		completed;
158 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
159 	int					status;
160 	/* state uses enum accel_sequence_state */
161 	uint8_t					state;
162 	bool					in_process_sequence;
163 	spdk_accel_completion_cb		cb_fn;
164 	void					*cb_arg;
165 	TAILQ_ENTRY(spdk_accel_sequence)	link;
166 };
167 
168 #define accel_update_stats(ch, event, v) \
169 	do { \
170 		(ch)->stats.event += (v); \
171 	} while (0)
172 
173 #define accel_update_task_stats(ch, task, event, v) \
174 	accel_update_stats(ch, operations[(task)->op_code].event, v)
175 
176 static inline void
177 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
178 {
179 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
180 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
181 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
182 	seq->state = state;
183 }
184 
185 static void
186 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
187 {
188 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
189 	assert(status != 0);
190 	seq->status = status;
191 }
192 
193 int
194 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
195 {
196 	if (opcode >= ACCEL_OPC_LAST) {
197 		/* invalid opcode */
198 		return -EINVAL;
199 	}
200 
201 	if (g_modules_opc[opcode].module) {
202 		*module_name = g_modules_opc[opcode].module->name;
203 	} else {
204 		return -ENOENT;
205 	}
206 
207 	return 0;
208 }
209 
210 void
211 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
212 {
213 	struct spdk_accel_module_if *accel_module;
214 	enum accel_opcode opcode;
215 	int j = 0;
216 
217 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
218 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
219 			if (accel_module->supports_opcode(opcode)) {
220 				info->ops[j] = opcode;
221 				j++;
222 			}
223 		}
224 		info->name = accel_module->name;
225 		info->num_ops = j;
226 		fn(info);
227 		j = 0;
228 	}
229 }
230 
231 int
232 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
233 {
234 	int rc = 0;
235 
236 	if (opcode < ACCEL_OPC_LAST) {
237 		*opcode_name = g_opcode_strings[opcode];
238 	} else {
239 		/* invalid opcode */
240 		rc = -EINVAL;
241 	}
242 
243 	return rc;
244 }
245 
246 int
247 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
248 {
249 	char *copy;
250 
251 	if (g_modules_started == true) {
252 		/* we don't allow re-assignment once things have started */
253 		return -EINVAL;
254 	}
255 
256 	if (opcode >= ACCEL_OPC_LAST) {
257 		/* invalid opcode */
258 		return -EINVAL;
259 	}
260 
261 	copy = strdup(name);
262 	if (copy == NULL) {
263 		return -ENOMEM;
264 	}
265 
266 	/* module selection will be validated after the framework starts. */
267 	g_modules_opc_override[opcode] = copy;
268 
269 	return 0;
270 }
271 
272 void
273 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
274 {
275 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
276 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
277 	void				*cb_arg = accel_task->cb_arg;
278 
279 	/* We should put the accel_task into the list firstly in order to avoid
280 	 * the accel task list is exhausted when there is recursive call to
281 	 * allocate accel_task in user's call back function (cb_fn)
282 	 */
283 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
284 	accel_task->seq = NULL;
285 
286 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
287 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
288 	if (spdk_unlikely(status != 0)) {
289 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
290 	}
291 
292 	cb_fn(cb_arg, status);
293 }
294 
295 inline static struct spdk_accel_task *
296 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
297 {
298 	struct spdk_accel_task *accel_task;
299 
300 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
301 	if (spdk_unlikely(accel_task == NULL)) {
302 		return NULL;
303 	}
304 
305 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
306 	accel_task->link.tqe_next = NULL;
307 	accel_task->link.tqe_prev = NULL;
308 
309 	accel_task->cb_fn = cb_fn;
310 	accel_task->cb_arg = cb_arg;
311 	accel_task->accel_ch = accel_ch;
312 	accel_task->s.iovs = NULL;
313 	accel_task->d.iovs = NULL;
314 
315 	return accel_task;
316 }
317 
318 static inline int
319 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
320 {
321 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
322 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
323 	int rc;
324 
325 	rc = module->submit_tasks(module_ch, task);
326 	if (spdk_unlikely(rc != 0)) {
327 		accel_update_task_stats(accel_ch, task, failed, 1);
328 	}
329 
330 	return rc;
331 }
332 
333 static inline uint64_t
334 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
335 {
336 	uint64_t result = 0;
337 	uint32_t i;
338 
339 	for (i = 0; i < iovcnt; ++i) {
340 		result += iovs[i].iov_len;
341 	}
342 
343 	return result;
344 }
345 
346 /* Accel framework public API for copy function */
347 int
348 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
349 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
350 {
351 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
352 	struct spdk_accel_task *accel_task;
353 
354 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
355 	if (spdk_unlikely(accel_task == NULL)) {
356 		return -ENOMEM;
357 	}
358 
359 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
360 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
361 	accel_task->d.iovs[0].iov_base = dst;
362 	accel_task->d.iovs[0].iov_len = nbytes;
363 	accel_task->d.iovcnt = 1;
364 	accel_task->s.iovs[0].iov_base = src;
365 	accel_task->s.iovs[0].iov_len = nbytes;
366 	accel_task->s.iovcnt = 1;
367 	accel_task->nbytes = nbytes;
368 	accel_task->op_code = ACCEL_OPC_COPY;
369 	accel_task->flags = flags;
370 	accel_task->src_domain = NULL;
371 	accel_task->dst_domain = NULL;
372 	accel_task->step_cb_fn = NULL;
373 
374 	return accel_submit_task(accel_ch, accel_task);
375 }
376 
377 /* Accel framework public API for dual cast copy function */
378 int
379 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
380 			   void *dst2, void *src, uint64_t nbytes, int flags,
381 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
382 {
383 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
384 	struct spdk_accel_task *accel_task;
385 
386 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
387 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
388 		return -EINVAL;
389 	}
390 
391 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
392 	if (spdk_unlikely(accel_task == NULL)) {
393 		return -ENOMEM;
394 	}
395 
396 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
397 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
398 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
399 	accel_task->d.iovs[0].iov_base = dst1;
400 	accel_task->d.iovs[0].iov_len = nbytes;
401 	accel_task->d.iovcnt = 1;
402 	accel_task->d2.iovs[0].iov_base = dst2;
403 	accel_task->d2.iovs[0].iov_len = nbytes;
404 	accel_task->d2.iovcnt = 1;
405 	accel_task->s.iovs[0].iov_base = src;
406 	accel_task->s.iovs[0].iov_len = nbytes;
407 	accel_task->s.iovcnt = 1;
408 	accel_task->nbytes = nbytes;
409 	accel_task->flags = flags;
410 	accel_task->op_code = ACCEL_OPC_DUALCAST;
411 	accel_task->src_domain = NULL;
412 	accel_task->dst_domain = NULL;
413 	accel_task->step_cb_fn = NULL;
414 
415 	return accel_submit_task(accel_ch, accel_task);
416 }
417 
418 /* Accel framework public API for compare function */
419 int
420 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
421 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
422 			  void *cb_arg)
423 {
424 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
425 	struct spdk_accel_task *accel_task;
426 
427 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
428 	if (spdk_unlikely(accel_task == NULL)) {
429 		return -ENOMEM;
430 	}
431 
432 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
433 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
434 	accel_task->s.iovs[0].iov_base = src1;
435 	accel_task->s.iovs[0].iov_len = nbytes;
436 	accel_task->s.iovcnt = 1;
437 	accel_task->s2.iovs[0].iov_base = src2;
438 	accel_task->s2.iovs[0].iov_len = nbytes;
439 	accel_task->s2.iovcnt = 1;
440 	accel_task->nbytes = nbytes;
441 	accel_task->op_code = ACCEL_OPC_COMPARE;
442 	accel_task->src_domain = NULL;
443 	accel_task->dst_domain = NULL;
444 	accel_task->step_cb_fn = NULL;
445 
446 	return accel_submit_task(accel_ch, accel_task);
447 }
448 
449 /* Accel framework public API for fill function */
450 int
451 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
452 		       uint8_t fill, uint64_t nbytes, int flags,
453 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
454 {
455 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
456 	struct spdk_accel_task *accel_task;
457 
458 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
459 	if (spdk_unlikely(accel_task == NULL)) {
460 		return -ENOMEM;
461 	}
462 
463 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
464 	accel_task->d.iovs[0].iov_base = dst;
465 	accel_task->d.iovs[0].iov_len = nbytes;
466 	accel_task->d.iovcnt = 1;
467 	accel_task->nbytes = nbytes;
468 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
469 	accel_task->flags = flags;
470 	accel_task->op_code = ACCEL_OPC_FILL;
471 	accel_task->src_domain = NULL;
472 	accel_task->dst_domain = NULL;
473 	accel_task->step_cb_fn = NULL;
474 
475 	return accel_submit_task(accel_ch, accel_task);
476 }
477 
478 /* Accel framework public API for CRC-32C function */
479 int
480 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
481 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
482 			 void *cb_arg)
483 {
484 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
485 	struct spdk_accel_task *accel_task;
486 
487 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
488 	if (spdk_unlikely(accel_task == NULL)) {
489 		return -ENOMEM;
490 	}
491 
492 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
493 	accel_task->s.iovs[0].iov_base = src;
494 	accel_task->s.iovs[0].iov_len = nbytes;
495 	accel_task->s.iovcnt = 1;
496 	accel_task->nbytes = nbytes;
497 	accel_task->crc_dst = crc_dst;
498 	accel_task->seed = seed;
499 	accel_task->op_code = ACCEL_OPC_CRC32C;
500 	accel_task->src_domain = NULL;
501 	accel_task->dst_domain = NULL;
502 	accel_task->step_cb_fn = NULL;
503 
504 	return accel_submit_task(accel_ch, accel_task);
505 }
506 
507 /* Accel framework public API for chained CRC-32C function */
508 int
509 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
510 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
511 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
512 {
513 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
514 	struct spdk_accel_task *accel_task;
515 
516 	if (iov == NULL) {
517 		SPDK_ERRLOG("iov should not be NULL");
518 		return -EINVAL;
519 	}
520 
521 	if (!iov_cnt) {
522 		SPDK_ERRLOG("iovcnt should not be zero value\n");
523 		return -EINVAL;
524 	}
525 
526 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
527 	if (spdk_unlikely(accel_task == NULL)) {
528 		SPDK_ERRLOG("no memory\n");
529 		assert(0);
530 		return -ENOMEM;
531 	}
532 
533 	accel_task->s.iovs = iov;
534 	accel_task->s.iovcnt = iov_cnt;
535 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
536 	accel_task->crc_dst = crc_dst;
537 	accel_task->seed = seed;
538 	accel_task->op_code = ACCEL_OPC_CRC32C;
539 	accel_task->src_domain = NULL;
540 	accel_task->dst_domain = NULL;
541 	accel_task->step_cb_fn = NULL;
542 
543 	return accel_submit_task(accel_ch, accel_task);
544 }
545 
546 /* Accel framework public API for copy with CRC-32C function */
547 int
548 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
549 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
550 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
551 {
552 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
553 	struct spdk_accel_task *accel_task;
554 
555 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
556 	if (spdk_unlikely(accel_task == NULL)) {
557 		return -ENOMEM;
558 	}
559 
560 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
561 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
562 	accel_task->d.iovs[0].iov_base = dst;
563 	accel_task->d.iovs[0].iov_len = nbytes;
564 	accel_task->d.iovcnt = 1;
565 	accel_task->s.iovs[0].iov_base = src;
566 	accel_task->s.iovs[0].iov_len = nbytes;
567 	accel_task->s.iovcnt = 1;
568 	accel_task->nbytes = nbytes;
569 	accel_task->crc_dst = crc_dst;
570 	accel_task->seed = seed;
571 	accel_task->flags = flags;
572 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
573 	accel_task->src_domain = NULL;
574 	accel_task->dst_domain = NULL;
575 	accel_task->step_cb_fn = NULL;
576 
577 	return accel_submit_task(accel_ch, accel_task);
578 }
579 
580 /* Accel framework public API for chained copy + CRC-32C function */
581 int
582 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
583 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
584 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
585 {
586 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
587 	struct spdk_accel_task *accel_task;
588 	uint64_t nbytes;
589 
590 	if (src_iovs == NULL) {
591 		SPDK_ERRLOG("iov should not be NULL");
592 		return -EINVAL;
593 	}
594 
595 	if (!iov_cnt) {
596 		SPDK_ERRLOG("iovcnt should not be zero value\n");
597 		return -EINVAL;
598 	}
599 
600 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
601 	if (spdk_unlikely(accel_task == NULL)) {
602 		SPDK_ERRLOG("no memory\n");
603 		assert(0);
604 		return -ENOMEM;
605 	}
606 
607 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
608 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
609 	accel_task->d.iovs[0].iov_base = dst;
610 	accel_task->d.iovs[0].iov_len = nbytes;
611 	accel_task->d.iovcnt = 1;
612 	accel_task->s.iovs = src_iovs;
613 	accel_task->s.iovcnt = iov_cnt;
614 	accel_task->nbytes = nbytes;
615 	accel_task->crc_dst = crc_dst;
616 	accel_task->seed = seed;
617 	accel_task->flags = flags;
618 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
619 	accel_task->src_domain = NULL;
620 	accel_task->dst_domain = NULL;
621 	accel_task->step_cb_fn = NULL;
622 
623 	return accel_submit_task(accel_ch, accel_task);
624 }
625 
626 int
627 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
628 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
629 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
630 {
631 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
632 	struct spdk_accel_task *accel_task;
633 
634 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
635 	if (spdk_unlikely(accel_task == NULL)) {
636 		return -ENOMEM;
637 	}
638 
639 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
640 	accel_task->d.iovs[0].iov_base = dst;
641 	accel_task->d.iovs[0].iov_len = nbytes;
642 	accel_task->d.iovcnt = 1;
643 	accel_task->output_size = output_size;
644 	accel_task->s.iovs = src_iovs;
645 	accel_task->s.iovcnt = src_iovcnt;
646 	accel_task->nbytes = nbytes;
647 	accel_task->flags = flags;
648 	accel_task->op_code = ACCEL_OPC_COMPRESS;
649 	accel_task->src_domain = NULL;
650 	accel_task->dst_domain = NULL;
651 	accel_task->step_cb_fn = NULL;
652 
653 	return accel_submit_task(accel_ch, accel_task);
654 }
655 
656 int
657 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
658 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
659 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
660 			     void *cb_arg)
661 {
662 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
663 	struct spdk_accel_task *accel_task;
664 
665 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
666 	if (spdk_unlikely(accel_task == NULL)) {
667 		return -ENOMEM;
668 	}
669 
670 	accel_task->output_size = output_size;
671 	accel_task->s.iovs = src_iovs;
672 	accel_task->s.iovcnt = src_iovcnt;
673 	accel_task->d.iovs = dst_iovs;
674 	accel_task->d.iovcnt = dst_iovcnt;
675 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
676 	accel_task->flags = flags;
677 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
678 	accel_task->src_domain = NULL;
679 	accel_task->dst_domain = NULL;
680 	accel_task->step_cb_fn = NULL;
681 
682 	return accel_submit_task(accel_ch, accel_task);
683 }
684 
685 int
686 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
687 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
688 			  struct iovec *src_iovs, uint32_t src_iovcnt,
689 			  uint64_t iv, uint32_t block_size, int flags,
690 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
691 {
692 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
693 	struct spdk_accel_task *accel_task;
694 
695 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
696 		return -EINVAL;
697 	}
698 
699 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
700 	if (spdk_unlikely(accel_task == NULL)) {
701 		return -ENOMEM;
702 	}
703 
704 	accel_task->crypto_key = key;
705 	accel_task->s.iovs = src_iovs;
706 	accel_task->s.iovcnt = src_iovcnt;
707 	accel_task->d.iovs = dst_iovs;
708 	accel_task->d.iovcnt = dst_iovcnt;
709 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
710 	accel_task->iv = iv;
711 	accel_task->block_size = block_size;
712 	accel_task->flags = flags;
713 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
714 	accel_task->src_domain = NULL;
715 	accel_task->dst_domain = NULL;
716 	accel_task->step_cb_fn = NULL;
717 
718 	return accel_submit_task(accel_ch, accel_task);
719 }
720 
721 int
722 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
723 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
724 			  struct iovec *src_iovs, uint32_t src_iovcnt,
725 			  uint64_t iv, uint32_t block_size, int flags,
726 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
727 {
728 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
729 	struct spdk_accel_task *accel_task;
730 
731 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
732 		return -EINVAL;
733 	}
734 
735 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
736 	if (spdk_unlikely(accel_task == NULL)) {
737 		return -ENOMEM;
738 	}
739 
740 	accel_task->crypto_key = key;
741 	accel_task->s.iovs = src_iovs;
742 	accel_task->s.iovcnt = src_iovcnt;
743 	accel_task->d.iovs = dst_iovs;
744 	accel_task->d.iovcnt = dst_iovcnt;
745 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
746 	accel_task->iv = iv;
747 	accel_task->block_size = block_size;
748 	accel_task->flags = flags;
749 	accel_task->op_code = ACCEL_OPC_DECRYPT;
750 	accel_task->src_domain = NULL;
751 	accel_task->dst_domain = NULL;
752 	accel_task->step_cb_fn = NULL;
753 
754 	return accel_submit_task(accel_ch, accel_task);
755 }
756 
757 int
758 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
759 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
760 {
761 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
762 	struct spdk_accel_task *accel_task;
763 
764 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
765 	if (spdk_unlikely(accel_task == NULL)) {
766 		return -ENOMEM;
767 	}
768 
769 	accel_task->nsrcs.srcs = sources;
770 	accel_task->nsrcs.cnt = nsrcs;
771 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
772 	accel_task->d.iovs[0].iov_base = dst;
773 	accel_task->d.iovs[0].iov_len = nbytes;
774 	accel_task->d.iovcnt = 1;
775 	accel_task->nbytes = nbytes;
776 	accel_task->op_code = ACCEL_OPC_XOR;
777 	accel_task->src_domain = NULL;
778 	accel_task->dst_domain = NULL;
779 	accel_task->step_cb_fn = NULL;
780 
781 	return accel_submit_task(accel_ch, accel_task);
782 }
783 
784 static inline struct accel_buffer *
785 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
786 {
787 	struct accel_buffer *buf;
788 
789 	buf = TAILQ_FIRST(&ch->buf_pool);
790 	if (spdk_unlikely(buf == NULL)) {
791 		return NULL;
792 	}
793 
794 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
795 	buf->len = len;
796 	buf->buf = NULL;
797 	buf->seq = NULL;
798 	buf->cb_fn = NULL;
799 
800 	return buf;
801 }
802 
803 static inline void
804 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
805 {
806 	if (buf->buf != NULL) {
807 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
808 	}
809 
810 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
811 }
812 
813 static inline struct spdk_accel_sequence *
814 accel_sequence_get(struct accel_io_channel *ch)
815 {
816 	struct spdk_accel_sequence *seq;
817 
818 	seq = TAILQ_FIRST(&ch->seq_pool);
819 	if (spdk_unlikely(seq == NULL)) {
820 		return NULL;
821 	}
822 
823 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
824 
825 	TAILQ_INIT(&seq->tasks);
826 	TAILQ_INIT(&seq->completed);
827 	TAILQ_INIT(&seq->bounce_bufs);
828 
829 	seq->ch = ch;
830 	seq->status = 0;
831 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
832 	seq->in_process_sequence = false;
833 
834 	return seq;
835 }
836 
837 static inline void
838 accel_sequence_put(struct spdk_accel_sequence *seq)
839 {
840 	struct accel_io_channel *ch = seq->ch;
841 	struct accel_buffer *buf;
842 
843 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
844 		buf = TAILQ_FIRST(&seq->bounce_bufs);
845 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
846 		accel_put_buf(seq->ch, buf);
847 	}
848 
849 	assert(TAILQ_EMPTY(&seq->tasks));
850 	assert(TAILQ_EMPTY(&seq->completed));
851 	seq->ch = NULL;
852 
853 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
854 }
855 
856 static void accel_sequence_task_cb(void *cb_arg, int status);
857 
858 static inline struct spdk_accel_task *
859 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
860 			spdk_accel_step_cb cb_fn, void *cb_arg)
861 {
862 	struct spdk_accel_task *task;
863 
864 	task = _get_task(ch, accel_sequence_task_cb, seq);
865 	if (spdk_unlikely(task == NULL)) {
866 		return task;
867 	}
868 
869 	task->step_cb_fn = cb_fn;
870 	task->step_cb_arg = cb_arg;
871 	task->seq = seq;
872 
873 	return task;
874 }
875 
876 int
877 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
878 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
879 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
880 		       struct iovec *src_iovs, uint32_t src_iovcnt,
881 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
882 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
883 {
884 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
885 	struct spdk_accel_task *task;
886 	struct spdk_accel_sequence *seq = *pseq;
887 
888 	if (seq == NULL) {
889 		seq = accel_sequence_get(accel_ch);
890 		if (spdk_unlikely(seq == NULL)) {
891 			return -ENOMEM;
892 		}
893 	}
894 
895 	assert(seq->ch == accel_ch);
896 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
897 	if (spdk_unlikely(task == NULL)) {
898 		if (*pseq == NULL) {
899 			accel_sequence_put(seq);
900 		}
901 
902 		return -ENOMEM;
903 	}
904 
905 	task->dst_domain = dst_domain;
906 	task->dst_domain_ctx = dst_domain_ctx;
907 	task->d.iovs = dst_iovs;
908 	task->d.iovcnt = dst_iovcnt;
909 	task->src_domain = src_domain;
910 	task->src_domain_ctx = src_domain_ctx;
911 	task->s.iovs = src_iovs;
912 	task->s.iovcnt = src_iovcnt;
913 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
914 	task->flags = flags;
915 	task->op_code = ACCEL_OPC_COPY;
916 
917 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
918 	*pseq = seq;
919 
920 	return 0;
921 }
922 
923 int
924 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
925 		       void *buf, uint64_t len,
926 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
927 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
928 {
929 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
930 	struct spdk_accel_task *task;
931 	struct spdk_accel_sequence *seq = *pseq;
932 
933 	if (seq == NULL) {
934 		seq = accel_sequence_get(accel_ch);
935 		if (spdk_unlikely(seq == NULL)) {
936 			return -ENOMEM;
937 		}
938 	}
939 
940 	assert(seq->ch == accel_ch);
941 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
942 	if (spdk_unlikely(task == NULL)) {
943 		if (*pseq == NULL) {
944 			accel_sequence_put(seq);
945 		}
946 
947 		return -ENOMEM;
948 	}
949 
950 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
951 
952 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
953 	task->d.iovs[0].iov_base = buf;
954 	task->d.iovs[0].iov_len = len;
955 	task->d.iovcnt = 1;
956 	task->nbytes = len;
957 	task->src_domain = NULL;
958 	task->dst_domain = domain;
959 	task->dst_domain_ctx = domain_ctx;
960 	task->flags = flags;
961 	task->op_code = ACCEL_OPC_FILL;
962 
963 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
964 	*pseq = seq;
965 
966 	return 0;
967 }
968 
969 int
970 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
971 			     struct iovec *dst_iovs, size_t dst_iovcnt,
972 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
973 			     struct iovec *src_iovs, size_t src_iovcnt,
974 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
975 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
976 {
977 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
978 	struct spdk_accel_task *task;
979 	struct spdk_accel_sequence *seq = *pseq;
980 
981 	if (seq == NULL) {
982 		seq = accel_sequence_get(accel_ch);
983 		if (spdk_unlikely(seq == NULL)) {
984 			return -ENOMEM;
985 		}
986 	}
987 
988 	assert(seq->ch == accel_ch);
989 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
990 	if (spdk_unlikely(task == NULL)) {
991 		if (*pseq == NULL) {
992 			accel_sequence_put(seq);
993 		}
994 
995 		return -ENOMEM;
996 	}
997 
998 	/* TODO: support output_size for chaining */
999 	task->output_size = NULL;
1000 	task->dst_domain = dst_domain;
1001 	task->dst_domain_ctx = dst_domain_ctx;
1002 	task->d.iovs = dst_iovs;
1003 	task->d.iovcnt = dst_iovcnt;
1004 	task->src_domain = src_domain;
1005 	task->src_domain_ctx = src_domain_ctx;
1006 	task->s.iovs = src_iovs;
1007 	task->s.iovcnt = src_iovcnt;
1008 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1009 	task->flags = flags;
1010 	task->op_code = ACCEL_OPC_DECOMPRESS;
1011 
1012 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1013 	*pseq = seq;
1014 
1015 	return 0;
1016 }
1017 
1018 int
1019 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1020 			  struct spdk_accel_crypto_key *key,
1021 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1022 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1023 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1024 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1025 			  uint64_t iv, uint32_t block_size, int flags,
1026 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1027 {
1028 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1029 	struct spdk_accel_task *task;
1030 	struct spdk_accel_sequence *seq = *pseq;
1031 
1032 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1033 
1034 	if (seq == NULL) {
1035 		seq = accel_sequence_get(accel_ch);
1036 		if (spdk_unlikely(seq == NULL)) {
1037 			return -ENOMEM;
1038 		}
1039 	}
1040 
1041 	assert(seq->ch == accel_ch);
1042 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1043 	if (spdk_unlikely(task == NULL)) {
1044 		if (*pseq == NULL) {
1045 			accel_sequence_put(seq);
1046 		}
1047 
1048 		return -ENOMEM;
1049 	}
1050 
1051 	task->crypto_key = key;
1052 	task->src_domain = src_domain;
1053 	task->src_domain_ctx = src_domain_ctx;
1054 	task->s.iovs = src_iovs;
1055 	task->s.iovcnt = src_iovcnt;
1056 	task->dst_domain = dst_domain;
1057 	task->dst_domain_ctx = dst_domain_ctx;
1058 	task->d.iovs = dst_iovs;
1059 	task->d.iovcnt = dst_iovcnt;
1060 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1061 	task->iv = iv;
1062 	task->block_size = block_size;
1063 	task->flags = flags;
1064 	task->op_code = ACCEL_OPC_ENCRYPT;
1065 
1066 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1067 	*pseq = seq;
1068 
1069 	return 0;
1070 }
1071 
1072 int
1073 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1074 			  struct spdk_accel_crypto_key *key,
1075 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1076 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1077 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1078 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1079 			  uint64_t iv, uint32_t block_size, int flags,
1080 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1081 {
1082 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1083 	struct spdk_accel_task *task;
1084 	struct spdk_accel_sequence *seq = *pseq;
1085 
1086 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1087 
1088 	if (seq == NULL) {
1089 		seq = accel_sequence_get(accel_ch);
1090 		if (spdk_unlikely(seq == NULL)) {
1091 			return -ENOMEM;
1092 		}
1093 	}
1094 
1095 	assert(seq->ch == accel_ch);
1096 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1097 	if (spdk_unlikely(task == NULL)) {
1098 		if (*pseq == NULL) {
1099 			accel_sequence_put(seq);
1100 		}
1101 
1102 		return -ENOMEM;
1103 	}
1104 
1105 	task->crypto_key = key;
1106 	task->src_domain = src_domain;
1107 	task->src_domain_ctx = src_domain_ctx;
1108 	task->s.iovs = src_iovs;
1109 	task->s.iovcnt = src_iovcnt;
1110 	task->dst_domain = dst_domain;
1111 	task->dst_domain_ctx = dst_domain_ctx;
1112 	task->d.iovs = dst_iovs;
1113 	task->d.iovcnt = dst_iovcnt;
1114 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1115 	task->iv = iv;
1116 	task->block_size = block_size;
1117 	task->flags = flags;
1118 	task->op_code = ACCEL_OPC_DECRYPT;
1119 
1120 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1121 	*pseq = seq;
1122 
1123 	return 0;
1124 }
1125 
1126 int
1127 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1128 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1129 			 struct spdk_memory_domain *domain, void *domain_ctx,
1130 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1131 {
1132 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1133 	struct spdk_accel_task *task;
1134 	struct spdk_accel_sequence *seq = *pseq;
1135 
1136 	if (seq == NULL) {
1137 		seq = accel_sequence_get(accel_ch);
1138 		if (spdk_unlikely(seq == NULL)) {
1139 			return -ENOMEM;
1140 		}
1141 	}
1142 
1143 	assert(seq->ch == accel_ch);
1144 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1145 	if (spdk_unlikely(task == NULL)) {
1146 		if (*pseq == NULL) {
1147 			accel_sequence_put(seq);
1148 		}
1149 
1150 		return -ENOMEM;
1151 	}
1152 
1153 	task->s.iovs = iovs;
1154 	task->s.iovcnt = iovcnt;
1155 	task->src_domain = domain;
1156 	task->src_domain_ctx = domain_ctx;
1157 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1158 	task->crc_dst = dst;
1159 	task->seed = seed;
1160 	task->op_code = ACCEL_OPC_CRC32C;
1161 	task->dst_domain = NULL;
1162 
1163 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1164 	*pseq = seq;
1165 
1166 	return 0;
1167 }
1168 
1169 int
1170 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1171 		   struct spdk_memory_domain **domain, void **domain_ctx)
1172 {
1173 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1174 	struct accel_buffer *accel_buf;
1175 
1176 	accel_buf = accel_get_buf(accel_ch, len);
1177 	if (spdk_unlikely(accel_buf == NULL)) {
1178 		return -ENOMEM;
1179 	}
1180 
1181 	/* We always return the same pointer and identify the buffers through domain_ctx */
1182 	*buf = ACCEL_BUFFER_BASE;
1183 	*domain_ctx = accel_buf;
1184 	*domain = g_accel_domain;
1185 
1186 	return 0;
1187 }
1188 
1189 void
1190 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1191 		   struct spdk_memory_domain *domain, void *domain_ctx)
1192 {
1193 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1194 	struct accel_buffer *accel_buf = domain_ctx;
1195 
1196 	assert(domain == g_accel_domain);
1197 	assert(buf == ACCEL_BUFFER_BASE);
1198 
1199 	accel_put_buf(accel_ch, accel_buf);
1200 }
1201 
1202 static void
1203 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1204 {
1205 	struct spdk_accel_task *task;
1206 	struct accel_io_channel *ch = seq->ch;
1207 	spdk_accel_step_cb cb_fn;
1208 	void *cb_arg;
1209 
1210 	while (!TAILQ_EMPTY(&seq->completed)) {
1211 		task = TAILQ_FIRST(&seq->completed);
1212 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1213 		cb_fn = task->step_cb_fn;
1214 		cb_arg = task->step_cb_arg;
1215 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1216 		if (cb_fn != NULL) {
1217 			cb_fn(cb_arg);
1218 		}
1219 	}
1220 
1221 	while (!TAILQ_EMPTY(&seq->tasks)) {
1222 		task = TAILQ_FIRST(&seq->tasks);
1223 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1224 		cb_fn = task->step_cb_fn;
1225 		cb_arg = task->step_cb_arg;
1226 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1227 		if (cb_fn != NULL) {
1228 			cb_fn(cb_arg);
1229 		}
1230 	}
1231 }
1232 
1233 static void
1234 accel_sequence_complete(struct spdk_accel_sequence *seq)
1235 {
1236 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1237 
1238 	accel_update_stats(seq->ch, sequence_executed, 1);
1239 	if (spdk_unlikely(seq->status != 0)) {
1240 		accel_update_stats(seq->ch, sequence_failed, 1);
1241 	}
1242 
1243 	/* First notify all users that appended operations to this sequence */
1244 	accel_sequence_complete_tasks(seq);
1245 
1246 	/* Then notify the user that finished the sequence */
1247 	seq->cb_fn(seq->cb_arg, seq->status);
1248 
1249 	accel_sequence_put(seq);
1250 }
1251 
1252 static void
1253 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1254 {
1255 	uintptr_t offset;
1256 
1257 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1258 	assert(offset < accel_buf->len);
1259 
1260 	diov->iov_base = (char *)accel_buf->buf + offset;
1261 	diov->iov_len = siov->iov_len;
1262 }
1263 
1264 static void
1265 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1266 {
1267 	struct spdk_accel_task *task;
1268 	struct iovec *iov;
1269 
1270 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1271 	 * in a sequence that were using it.
1272 	 */
1273 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1274 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1275 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1276 			assert(task->s.iovcnt == 1);
1277 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1278 			task->src_domain = NULL;
1279 			task->s.iovs = iov;
1280 		}
1281 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1282 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1283 			assert(task->d.iovcnt == 1);
1284 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1285 			task->dst_domain = NULL;
1286 			task->d.iovs = iov;
1287 		}
1288 	}
1289 }
1290 
1291 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1292 
1293 static void
1294 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1295 {
1296 	struct accel_buffer *accel_buf;
1297 
1298 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1299 
1300 	assert(accel_buf->seq != NULL);
1301 	assert(accel_buf->buf == NULL);
1302 	accel_buf->buf = buf;
1303 
1304 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1305 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1306 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1307 	accel_process_sequence(accel_buf->seq);
1308 }
1309 
1310 static bool
1311 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1312 			 spdk_iobuf_get_cb cb_fn)
1313 {
1314 	struct accel_io_channel *ch = seq->ch;
1315 
1316 	assert(buf->buf == NULL);
1317 	assert(buf->seq == NULL);
1318 
1319 	buf->seq = seq;
1320 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1321 	if (buf->buf == NULL) {
1322 		return false;
1323 	}
1324 
1325 	return true;
1326 }
1327 
1328 static bool
1329 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1330 {
1331 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1332 	 * NULL */
1333 	if (task->src_domain == g_accel_domain) {
1334 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1335 					      accel_iobuf_get_virtbuf_cb)) {
1336 			return false;
1337 		}
1338 
1339 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1340 	}
1341 
1342 	if (task->dst_domain == g_accel_domain) {
1343 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1344 					      accel_iobuf_get_virtbuf_cb)) {
1345 			return false;
1346 		}
1347 
1348 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1349 	}
1350 
1351 	return true;
1352 }
1353 
1354 static void
1355 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1356 {
1357 	struct accel_buffer *accel_buf;
1358 
1359 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1360 
1361 	assert(accel_buf->seq != NULL);
1362 	assert(accel_buf->buf == NULL);
1363 	accel_buf->buf = buf;
1364 
1365 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1366 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1367 }
1368 
1369 bool
1370 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1371 			      struct spdk_memory_domain *domain, void *domain_ctx,
1372 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1373 {
1374 	struct accel_buffer *accel_buf = domain_ctx;
1375 
1376 	assert(domain == g_accel_domain);
1377 	accel_buf->cb_fn = cb_fn;
1378 	accel_buf->cb_ctx = cb_ctx;
1379 
1380 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1381 		return false;
1382 	}
1383 
1384 	accel_sequence_set_virtbuf(seq, accel_buf);
1385 
1386 	return true;
1387 }
1388 
1389 struct spdk_accel_task *
1390 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1391 {
1392 	return TAILQ_FIRST(&seq->tasks);
1393 }
1394 
1395 struct spdk_accel_task *
1396 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1397 {
1398 	return TAILQ_NEXT(task, seq_link);
1399 }
1400 
1401 static inline void
1402 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1403 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1404 			struct accel_buffer *buf)
1405 {
1406 	bounce->orig_iovs = *iovs;
1407 	bounce->orig_iovcnt = *iovcnt;
1408 	bounce->orig_domain = *domain;
1409 	bounce->orig_domain_ctx = *domain_ctx;
1410 	bounce->iov.iov_base = buf->buf;
1411 	bounce->iov.iov_len = buf->len;
1412 
1413 	*iovs = &bounce->iov;
1414 	*iovcnt = 1;
1415 	*domain = NULL;
1416 }
1417 
1418 static void
1419 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1420 {
1421 	struct spdk_accel_task *task;
1422 	struct accel_buffer *accel_buf;
1423 
1424 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1425 	assert(accel_buf->buf == NULL);
1426 	accel_buf->buf = buf;
1427 
1428 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1429 	assert(task != NULL);
1430 
1431 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1432 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1433 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1434 				&task->src_domain_ctx, accel_buf);
1435 	accel_process_sequence(accel_buf->seq);
1436 }
1437 
1438 static void
1439 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1440 {
1441 	struct spdk_accel_task *task;
1442 	struct accel_buffer *accel_buf;
1443 
1444 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1445 	assert(accel_buf->buf == NULL);
1446 	accel_buf->buf = buf;
1447 
1448 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1449 	assert(task != NULL);
1450 
1451 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1452 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1453 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1454 				&task->dst_domain_ctx, accel_buf);
1455 	accel_process_sequence(accel_buf->seq);
1456 }
1457 
1458 static int
1459 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1460 {
1461 	struct accel_buffer *buf;
1462 
1463 	if (task->src_domain != NULL) {
1464 		/* By the time we're here, accel buffers should have been allocated */
1465 		assert(task->src_domain != g_accel_domain);
1466 
1467 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1468 		if (buf == NULL) {
1469 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1470 			return -ENOMEM;
1471 		}
1472 
1473 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1474 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1475 			return -EAGAIN;
1476 		}
1477 
1478 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1479 					&task->src_domain, &task->src_domain_ctx, buf);
1480 	}
1481 
1482 	if (task->dst_domain != NULL) {
1483 		/* By the time we're here, accel buffers should have been allocated */
1484 		assert(task->dst_domain != g_accel_domain);
1485 
1486 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1487 		if (buf == NULL) {
1488 			/* The src buffer will be released when a sequence is completed */
1489 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1490 			return -ENOMEM;
1491 		}
1492 
1493 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1494 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1495 			return -EAGAIN;
1496 		}
1497 
1498 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1499 					&task->dst_domain, &task->dst_domain_ctx, buf);
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static void
1506 accel_task_pull_data_cb(void *ctx, int status)
1507 {
1508 	struct spdk_accel_sequence *seq = ctx;
1509 
1510 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1511 	if (spdk_likely(status == 0)) {
1512 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1513 	} else {
1514 		accel_sequence_set_fail(seq, status);
1515 	}
1516 
1517 	accel_process_sequence(seq);
1518 }
1519 
1520 static void
1521 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1522 {
1523 	int rc;
1524 
1525 	assert(task->bounce.s.orig_iovs != NULL);
1526 	assert(task->bounce.s.orig_domain != NULL);
1527 	assert(task->bounce.s.orig_domain != g_accel_domain);
1528 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1529 
1530 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1531 					  task->bounce.s.orig_domain_ctx,
1532 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1533 					  task->s.iovs, task->s.iovcnt,
1534 					  accel_task_pull_data_cb, seq);
1535 	if (spdk_unlikely(rc != 0)) {
1536 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1537 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1538 		accel_sequence_set_fail(seq, rc);
1539 	}
1540 }
1541 
1542 static void
1543 accel_task_push_data_cb(void *ctx, int status)
1544 {
1545 	struct spdk_accel_sequence *seq = ctx;
1546 
1547 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1548 	if (spdk_likely(status == 0)) {
1549 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1550 	} else {
1551 		accel_sequence_set_fail(seq, status);
1552 	}
1553 
1554 	accel_process_sequence(seq);
1555 }
1556 
1557 static void
1558 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1559 {
1560 	int rc;
1561 
1562 	assert(task->bounce.d.orig_iovs != NULL);
1563 	assert(task->bounce.d.orig_domain != NULL);
1564 	assert(task->bounce.d.orig_domain != g_accel_domain);
1565 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1566 
1567 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1568 					  task->bounce.d.orig_domain_ctx,
1569 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1570 					  task->d.iovs, task->d.iovcnt,
1571 					  accel_task_push_data_cb, seq);
1572 	if (spdk_unlikely(rc != 0)) {
1573 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1574 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1575 		accel_sequence_set_fail(seq, rc);
1576 	}
1577 }
1578 
1579 static void
1580 accel_process_sequence(struct spdk_accel_sequence *seq)
1581 {
1582 	struct accel_io_channel *accel_ch = seq->ch;
1583 	struct spdk_accel_task *task;
1584 	enum accel_sequence_state state;
1585 	int rc;
1586 
1587 	/* Prevent recursive calls to this function */
1588 	if (spdk_unlikely(seq->in_process_sequence)) {
1589 		return;
1590 	}
1591 	seq->in_process_sequence = true;
1592 
1593 	task = TAILQ_FIRST(&seq->tasks);
1594 	do {
1595 		state = seq->state;
1596 		switch (state) {
1597 		case ACCEL_SEQUENCE_STATE_INIT:
1598 			if (g_accel_driver != NULL) {
1599 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1600 				break;
1601 			}
1602 		/* Fall through */
1603 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1604 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1605 			if (!accel_sequence_check_virtbuf(seq, task)) {
1606 				/* We couldn't allocate a buffer, wait until one is available */
1607 				break;
1608 			}
1609 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1610 		/* Fall through */
1611 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1612 			/* If a module supports memory domains, we don't need to allocate bounce
1613 			 * buffers */
1614 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1615 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1616 				break;
1617 			}
1618 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1619 			rc = accel_sequence_check_bouncebuf(seq, task);
1620 			if (spdk_unlikely(rc != 0)) {
1621 				/* We couldn't allocate a buffer, wait until one is available */
1622 				if (rc == -EAGAIN) {
1623 					break;
1624 				}
1625 				accel_sequence_set_fail(seq, rc);
1626 				break;
1627 			}
1628 			if (task->s.iovs == &task->bounce.s.iov) {
1629 				assert(task->bounce.s.orig_iovs);
1630 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1631 				break;
1632 			}
1633 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1634 		/* Fall through */
1635 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1636 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1637 				      g_opcode_strings[task->op_code], seq);
1638 
1639 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1640 			rc = accel_submit_task(accel_ch, task);
1641 			if (spdk_unlikely(rc != 0)) {
1642 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1643 					    g_opcode_strings[task->op_code], seq);
1644 				accel_sequence_set_fail(seq, rc);
1645 			}
1646 			break;
1647 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1648 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1649 			accel_task_pull_data(seq, task);
1650 			break;
1651 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1652 			if (task->d.iovs == &task->bounce.d.iov) {
1653 				assert(task->bounce.d.orig_iovs);
1654 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1655 				break;
1656 			}
1657 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1658 			break;
1659 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1660 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1661 			accel_task_push_data(seq, task);
1662 			break;
1663 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1664 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1665 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1666 			/* Check if there are any remaining tasks */
1667 			task = TAILQ_FIRST(&seq->tasks);
1668 			if (task == NULL) {
1669 				/* Immediately return here to make sure we don't touch the sequence
1670 				 * after it's completed */
1671 				accel_sequence_complete(seq);
1672 				return;
1673 			}
1674 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1675 			break;
1676 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1677 			assert(!TAILQ_EMPTY(&seq->tasks));
1678 
1679 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1680 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1681 			if (spdk_unlikely(rc != 0)) {
1682 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1683 					    seq, g_accel_driver->name);
1684 				accel_sequence_set_fail(seq, rc);
1685 			}
1686 			break;
1687 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1688 			/* Get the task again, as the driver might have completed some tasks
1689 			 * synchronously */
1690 			task = TAILQ_FIRST(&seq->tasks);
1691 			if (task == NULL) {
1692 				/* Immediately return here to make sure we don't touch the sequence
1693 				 * after it's completed */
1694 				accel_sequence_complete(seq);
1695 				return;
1696 			}
1697 			/* We don't want to execute the next task through the driver, so we
1698 			 * explicitly omit the INIT state here */
1699 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1700 			break;
1701 		case ACCEL_SEQUENCE_STATE_ERROR:
1702 			/* Immediately return here to make sure we don't touch the sequence
1703 			 * after it's completed */
1704 			assert(seq->status != 0);
1705 			accel_sequence_complete(seq);
1706 			return;
1707 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1708 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1709 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1710 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1711 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1712 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1713 			break;
1714 		default:
1715 			assert(0 && "bad state");
1716 			break;
1717 		}
1718 	} while (seq->state != state);
1719 
1720 	seq->in_process_sequence = false;
1721 }
1722 
1723 static void
1724 accel_sequence_task_cb(void *cb_arg, int status)
1725 {
1726 	struct spdk_accel_sequence *seq = cb_arg;
1727 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1728 	struct accel_io_channel *accel_ch = seq->ch;
1729 
1730 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1731 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1732 	 * easiest way to prevent this, even though it is a bit hacky.
1733 	 */
1734 	assert(task != NULL);
1735 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1736 
1737 	switch (seq->state) {
1738 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1739 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1740 		if (spdk_unlikely(status != 0)) {
1741 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1742 				    g_opcode_strings[task->op_code], seq);
1743 			accel_sequence_set_fail(seq, status);
1744 		}
1745 
1746 		accel_process_sequence(seq);
1747 		break;
1748 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1749 		assert(g_accel_driver != NULL);
1750 		/* Immediately remove the task from the outstanding list to make sure the next call
1751 		 * to spdk_accel_sequence_first_task() doesn't return it */
1752 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1753 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1754 
1755 		if (spdk_unlikely(status != 0)) {
1756 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1757 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1758 				    g_accel_driver->name);
1759 			/* Update status without using accel_sequence_set_fail() to avoid changing
1760 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1761 			seq->status = status;
1762 		}
1763 		break;
1764 	default:
1765 		assert(0 && "bad state");
1766 		break;
1767 	}
1768 }
1769 
1770 void
1771 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1772 {
1773 	assert(g_accel_driver != NULL);
1774 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1775 
1776 	if (spdk_likely(seq->status == 0)) {
1777 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1778 	} else {
1779 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1780 	}
1781 
1782 	accel_process_sequence(seq);
1783 }
1784 
1785 static bool
1786 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1787 {
1788 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1789 	if (iovacnt != iovbcnt) {
1790 		return false;
1791 	}
1792 
1793 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1794 }
1795 
1796 static bool
1797 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1798 {
1799 	struct spdk_accel_task *prev;
1800 
1801 	switch (task->op_code) {
1802 	case ACCEL_OPC_DECOMPRESS:
1803 	case ACCEL_OPC_FILL:
1804 	case ACCEL_OPC_ENCRYPT:
1805 	case ACCEL_OPC_DECRYPT:
1806 		if (task->dst_domain != next->src_domain) {
1807 			return false;
1808 		}
1809 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1810 					next->s.iovs, next->s.iovcnt)) {
1811 			return false;
1812 		}
1813 		task->d.iovs = next->d.iovs;
1814 		task->d.iovcnt = next->d.iovcnt;
1815 		task->dst_domain = next->dst_domain;
1816 		task->dst_domain_ctx = next->dst_domain_ctx;
1817 		break;
1818 	case ACCEL_OPC_CRC32C:
1819 		/* crc32 is special, because it doesn't have a dst buffer */
1820 		if (task->src_domain != next->src_domain) {
1821 			return false;
1822 		}
1823 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
1824 					next->s.iovs, next->s.iovcnt)) {
1825 			return false;
1826 		}
1827 		/* We can only change crc32's buffer if we can change previous task's buffer */
1828 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
1829 		if (prev == NULL) {
1830 			return false;
1831 		}
1832 		if (!accel_task_set_dstbuf(prev, next)) {
1833 			return false;
1834 		}
1835 		task->s.iovs = next->d.iovs;
1836 		task->s.iovcnt = next->d.iovcnt;
1837 		task->src_domain = next->dst_domain;
1838 		task->src_domain_ctx = next->dst_domain_ctx;
1839 		break;
1840 	default:
1841 		return false;
1842 	}
1843 
1844 	return true;
1845 }
1846 
1847 static void
1848 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1849 			   struct spdk_accel_task **next_task)
1850 {
1851 	struct spdk_accel_task *next = *next_task;
1852 
1853 	switch (task->op_code) {
1854 	case ACCEL_OPC_COPY:
1855 		/* We only allow changing src of operations that actually have a src, e.g. we never
1856 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1857 		 * change the src of the operation after fill (which in turn could also be a fill).
1858 		 * So, for the sake of simplicity, skip this type of operations for now.
1859 		 */
1860 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1861 		    next->op_code != ACCEL_OPC_COPY &&
1862 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1863 		    next->op_code != ACCEL_OPC_DECRYPT &&
1864 		    next->op_code != ACCEL_OPC_CRC32C) {
1865 			break;
1866 		}
1867 		if (task->dst_domain != next->src_domain) {
1868 			break;
1869 		}
1870 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1871 					next->s.iovs, next->s.iovcnt)) {
1872 			break;
1873 		}
1874 		next->s.iovs = task->s.iovs;
1875 		next->s.iovcnt = task->s.iovcnt;
1876 		next->src_domain = task->src_domain;
1877 		next->src_domain_ctx = task->src_domain_ctx;
1878 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1879 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1880 		break;
1881 	case ACCEL_OPC_DECOMPRESS:
1882 	case ACCEL_OPC_FILL:
1883 	case ACCEL_OPC_ENCRYPT:
1884 	case ACCEL_OPC_DECRYPT:
1885 	case ACCEL_OPC_CRC32C:
1886 		/* We can only merge tasks when one of them is a copy */
1887 		if (next->op_code != ACCEL_OPC_COPY) {
1888 			break;
1889 		}
1890 		if (!accel_task_set_dstbuf(task, next)) {
1891 			break;
1892 		}
1893 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1894 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1895 		*next_task = TAILQ_NEXT(next, seq_link);
1896 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1897 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1898 		break;
1899 	default:
1900 		assert(0 && "bad opcode");
1901 		break;
1902 	}
1903 }
1904 
1905 void
1906 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1907 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1908 {
1909 	struct spdk_accel_task *task, *next;
1910 
1911 	/* Try to remove any copy operations if possible */
1912 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1913 		if (next == NULL) {
1914 			break;
1915 		}
1916 		accel_sequence_merge_tasks(seq, task, &next);
1917 	}
1918 
1919 	seq->cb_fn = cb_fn;
1920 	seq->cb_arg = cb_arg;
1921 
1922 	accel_process_sequence(seq);
1923 }
1924 
1925 void
1926 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1927 {
1928 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1929 	struct spdk_accel_task *task;
1930 
1931 	assert(TAILQ_EMPTY(&seq->completed));
1932 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1933 
1934 	while (!TAILQ_EMPTY(&tasks)) {
1935 		task = TAILQ_FIRST(&tasks);
1936 		TAILQ_REMOVE(&tasks, task, seq_link);
1937 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1938 	}
1939 }
1940 
1941 void
1942 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1943 {
1944 	if (seq == NULL) {
1945 		return;
1946 	}
1947 
1948 	accel_sequence_complete_tasks(seq);
1949 	accel_sequence_put(seq);
1950 }
1951 
1952 struct spdk_memory_domain *
1953 spdk_accel_get_memory_domain(void)
1954 {
1955 	return g_accel_domain;
1956 }
1957 
1958 static struct spdk_accel_module_if *
1959 _module_find_by_name(const char *name)
1960 {
1961 	struct spdk_accel_module_if *accel_module = NULL;
1962 
1963 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1964 		if (strcmp(name, accel_module->name) == 0) {
1965 			break;
1966 		}
1967 	}
1968 
1969 	return accel_module;
1970 }
1971 
1972 static inline struct spdk_accel_crypto_key *
1973 _accel_crypto_key_get(const char *name)
1974 {
1975 	struct spdk_accel_crypto_key *key;
1976 
1977 	assert(spdk_spin_held(&g_keyring_spin));
1978 
1979 	TAILQ_FOREACH(key, &g_keyring, link) {
1980 		if (strcmp(name, key->param.key_name) == 0) {
1981 			return key;
1982 		}
1983 	}
1984 
1985 	return NULL;
1986 }
1987 
1988 static void
1989 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1990 {
1991 	if (key->param.hex_key) {
1992 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1993 		free(key->param.hex_key);
1994 	}
1995 	if (key->param.hex_key2) {
1996 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1997 		free(key->param.hex_key2);
1998 	}
1999 	free(key->param.tweak_mode);
2000 	free(key->param.key_name);
2001 	free(key->param.cipher);
2002 	if (key->key) {
2003 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2004 		free(key->key);
2005 	}
2006 	if (key->key2) {
2007 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2008 		free(key->key2);
2009 	}
2010 	free(key);
2011 }
2012 
2013 static void
2014 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2015 {
2016 	assert(key->module_if);
2017 	assert(key->module_if->crypto_key_deinit);
2018 
2019 	key->module_if->crypto_key_deinit(key);
2020 	accel_crypto_key_free_mem(key);
2021 }
2022 
2023 /*
2024  * This function mitigates a timing side channel which could be caused by using strcmp()
2025  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2026  * the article [1] for more details
2027  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2028  */
2029 static bool
2030 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2031 {
2032 	size_t i;
2033 	volatile size_t x = k1_len ^ k2_len;
2034 
2035 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2036 		x |= k1[i] ^ k2[i];
2037 	}
2038 
2039 	return x == 0;
2040 }
2041 
2042 static const char *g_tweak_modes[] = {
2043 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2044 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2045 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2046 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2047 };
2048 
2049 static const char *g_ciphers[] = {
2050 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2051 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2052 };
2053 
2054 int
2055 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2056 {
2057 	struct spdk_accel_module_if *module;
2058 	struct spdk_accel_crypto_key *key;
2059 	size_t hex_key_size, hex_key2_size;
2060 	bool found = false;
2061 	size_t i;
2062 	int rc;
2063 
2064 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2065 		return -EINVAL;
2066 	}
2067 
2068 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2069 		/* hardly ever possible, but let's check and warn the user */
2070 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2071 	}
2072 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
2073 
2074 	if (!module) {
2075 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2076 		return -ENOENT;
2077 	}
2078 
2079 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2080 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2081 		return -ENOTSUP;
2082 	}
2083 
2084 	key = calloc(1, sizeof(*key));
2085 	if (!key) {
2086 		return -ENOMEM;
2087 	}
2088 
2089 	key->param.key_name = strdup(param->key_name);
2090 	if (!key->param.key_name) {
2091 		rc = -ENOMEM;
2092 		goto error;
2093 	}
2094 
2095 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2096 		assert(g_ciphers[i]);
2097 
2098 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2099 			key->cipher = i;
2100 			found = true;
2101 			break;
2102 		}
2103 	}
2104 
2105 	if (!found) {
2106 		SPDK_ERRLOG("Failed to parse cipher\n");
2107 		rc = -EINVAL;
2108 		goto error;
2109 	}
2110 
2111 	key->param.cipher = strdup(param->cipher);
2112 	if (!key->param.cipher) {
2113 		rc = -ENOMEM;
2114 		goto error;
2115 	}
2116 
2117 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2118 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2119 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2120 		rc = -EINVAL;
2121 		goto error;
2122 	}
2123 
2124 	if (hex_key_size == 0) {
2125 		SPDK_ERRLOG("key1 size cannot be 0\n");
2126 		rc = -EINVAL;
2127 		goto error;
2128 	}
2129 
2130 	key->param.hex_key = strdup(param->hex_key);
2131 	if (!key->param.hex_key) {
2132 		rc = -ENOMEM;
2133 		goto error;
2134 	}
2135 
2136 	key->key_size = hex_key_size / 2;
2137 	key->key = spdk_unhexlify(key->param.hex_key);
2138 	if (!key->key) {
2139 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2140 		rc = -EINVAL;
2141 		goto error;
2142 	}
2143 
2144 	if (param->hex_key2) {
2145 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2146 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2147 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2148 			rc = -EINVAL;
2149 			goto error;
2150 		}
2151 
2152 		if (hex_key2_size == 0) {
2153 			SPDK_ERRLOG("key2 size cannot be 0\n");
2154 			rc = -EINVAL;
2155 			goto error;
2156 		}
2157 
2158 		key->param.hex_key2 = strdup(param->hex_key2);
2159 		if (!key->param.hex_key2) {
2160 			rc = -ENOMEM;
2161 			goto error;
2162 		}
2163 
2164 		key->key2_size = hex_key2_size / 2;
2165 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2166 		if (!key->key2) {
2167 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2168 			rc = -EINVAL;
2169 			goto error;
2170 		}
2171 	}
2172 
2173 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2174 	if (param->tweak_mode) {
2175 		found = false;
2176 
2177 		key->param.tweak_mode = strdup(param->tweak_mode);
2178 		if (!key->param.tweak_mode) {
2179 			rc = -ENOMEM;
2180 			goto error;
2181 		}
2182 
2183 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2184 			assert(g_tweak_modes[i]);
2185 
2186 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2187 				key->tweak_mode = i;
2188 				found = true;
2189 				break;
2190 			}
2191 		}
2192 
2193 		if (!found) {
2194 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2195 			rc = -EINVAL;
2196 			goto error;
2197 		}
2198 	}
2199 
2200 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2201 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2202 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2203 			    g_tweak_modes[key->tweak_mode]);
2204 		rc = -EINVAL;
2205 		goto error;
2206 	}
2207 
2208 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2209 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2210 			    g_ciphers[key->cipher], key->key_size);
2211 		rc = -EINVAL;
2212 		goto error;
2213 	}
2214 
2215 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2216 		if (!key->key2) {
2217 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2218 			rc = -EINVAL;
2219 			goto error;
2220 		}
2221 
2222 		if (key->key_size != key->key2_size) {
2223 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2224 				    key->key_size,
2225 				    key->key2_size);
2226 			rc = -EINVAL;
2227 			goto error;
2228 		}
2229 
2230 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2231 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2232 			rc = -EINVAL;
2233 			goto error;
2234 		}
2235 	}
2236 
2237 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2238 		if (key->key2_size) {
2239 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2240 			rc = -EINVAL;
2241 			goto error;
2242 		}
2243 	}
2244 
2245 	key->module_if = module;
2246 
2247 	spdk_spin_lock(&g_keyring_spin);
2248 	if (_accel_crypto_key_get(param->key_name)) {
2249 		rc = -EEXIST;
2250 	} else {
2251 		rc = module->crypto_key_init(key);
2252 		if (rc) {
2253 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2254 		} else {
2255 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2256 		}
2257 	}
2258 	spdk_spin_unlock(&g_keyring_spin);
2259 
2260 	if (rc) {
2261 		goto error;
2262 	}
2263 
2264 	return 0;
2265 
2266 error:
2267 	accel_crypto_key_free_mem(key);
2268 	return rc;
2269 }
2270 
2271 int
2272 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2273 {
2274 	if (!key || !key->module_if) {
2275 		return -EINVAL;
2276 	}
2277 
2278 	spdk_spin_lock(&g_keyring_spin);
2279 	if (!_accel_crypto_key_get(key->param.key_name)) {
2280 		spdk_spin_unlock(&g_keyring_spin);
2281 		return -ENOENT;
2282 	}
2283 	TAILQ_REMOVE(&g_keyring, key, link);
2284 	spdk_spin_unlock(&g_keyring_spin);
2285 
2286 	accel_crypto_key_destroy_unsafe(key);
2287 
2288 	return 0;
2289 }
2290 
2291 struct spdk_accel_crypto_key *
2292 spdk_accel_crypto_key_get(const char *name)
2293 {
2294 	struct spdk_accel_crypto_key *key;
2295 
2296 	spdk_spin_lock(&g_keyring_spin);
2297 	key = _accel_crypto_key_get(name);
2298 	spdk_spin_unlock(&g_keyring_spin);
2299 
2300 	return key;
2301 }
2302 
2303 /* Helper function when accel modules register with the framework. */
2304 void
2305 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2306 {
2307 	if (_module_find_by_name(accel_module->name)) {
2308 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2309 		assert(false);
2310 		return;
2311 	}
2312 
2313 	/* Make sure that the software module is at the head of the list, this
2314 	 * will assure that all opcodes are later assigned to software first and
2315 	 * then updated to HW modules as they are registered.
2316 	 */
2317 	if (strcmp(accel_module->name, "software") == 0) {
2318 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2319 	} else {
2320 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2321 	}
2322 
2323 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2324 		g_max_accel_module_size = accel_module->get_ctx_size();
2325 	}
2326 }
2327 
2328 /* Framework level channel create callback. */
2329 static int
2330 accel_create_channel(void *io_device, void *ctx_buf)
2331 {
2332 	struct accel_io_channel	*accel_ch = ctx_buf;
2333 	struct spdk_accel_task *accel_task;
2334 	struct spdk_accel_sequence *seq;
2335 	struct accel_buffer *buf;
2336 	uint8_t *task_mem;
2337 	uint32_t i = 0, j;
2338 	int rc;
2339 
2340 	accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size);
2341 	if (accel_ch->task_pool_base == NULL) {
2342 		return -ENOMEM;
2343 	}
2344 
2345 	accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence));
2346 	if (accel_ch->seq_pool_base == NULL) {
2347 		goto err;
2348 	}
2349 
2350 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2351 	if (accel_ch->buf_pool_base == NULL) {
2352 		goto err;
2353 	}
2354 
2355 	TAILQ_INIT(&accel_ch->task_pool);
2356 	TAILQ_INIT(&accel_ch->seq_pool);
2357 	TAILQ_INIT(&accel_ch->buf_pool);
2358 
2359 	task_mem = accel_ch->task_pool_base;
2360 	for (i = 0; i < g_opts.task_count; i++) {
2361 		accel_task = (struct spdk_accel_task *)task_mem;
2362 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2363 		task_mem += g_max_accel_module_size;
2364 	}
2365 	for (i = 0; i < g_opts.sequence_count; i++) {
2366 		seq = &accel_ch->seq_pool_base[i];
2367 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2368 	}
2369 	for (i = 0; i < g_opts.buf_count; i++) {
2370 		buf = &accel_ch->buf_pool_base[i];
2371 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2372 	}
2373 
2374 	/* Assign modules and get IO channels for each */
2375 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2376 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2377 		/* This can happen if idxd runs out of channels. */
2378 		if (accel_ch->module_ch[i] == NULL) {
2379 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2380 			goto err;
2381 		}
2382 	}
2383 
2384 	if (g_accel_driver != NULL) {
2385 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2386 		if (accel_ch->driver_channel == NULL) {
2387 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2388 			goto err;
2389 		}
2390 	}
2391 
2392 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2393 				     g_opts.large_cache_size);
2394 	if (rc != 0) {
2395 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2396 		goto err;
2397 	}
2398 
2399 	return 0;
2400 err:
2401 	if (accel_ch->driver_channel != NULL) {
2402 		spdk_put_io_channel(accel_ch->driver_channel);
2403 	}
2404 	for (j = 0; j < i; j++) {
2405 		spdk_put_io_channel(accel_ch->module_ch[j]);
2406 	}
2407 	free(accel_ch->task_pool_base);
2408 	free(accel_ch->seq_pool_base);
2409 	free(accel_ch->buf_pool_base);
2410 
2411 	return -ENOMEM;
2412 }
2413 
2414 static void
2415 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2416 {
2417 	int i;
2418 
2419 	total->sequence_executed += stats->sequence_executed;
2420 	total->sequence_failed += stats->sequence_failed;
2421 	for (i = 0; i < ACCEL_OPC_LAST; ++i) {
2422 		total->operations[i].executed += stats->operations[i].executed;
2423 		total->operations[i].failed += stats->operations[i].failed;
2424 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2425 	}
2426 }
2427 
2428 /* Framework level channel destroy callback. */
2429 static void
2430 accel_destroy_channel(void *io_device, void *ctx_buf)
2431 {
2432 	struct accel_io_channel	*accel_ch = ctx_buf;
2433 	int i;
2434 
2435 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2436 
2437 	if (accel_ch->driver_channel != NULL) {
2438 		spdk_put_io_channel(accel_ch->driver_channel);
2439 	}
2440 
2441 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2442 		assert(accel_ch->module_ch[i] != NULL);
2443 		spdk_put_io_channel(accel_ch->module_ch[i]);
2444 		accel_ch->module_ch[i] = NULL;
2445 	}
2446 
2447 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2448 	spdk_spin_lock(&g_stats_lock);
2449 	accel_add_stats(&g_stats, &accel_ch->stats);
2450 	spdk_spin_unlock(&g_stats_lock);
2451 
2452 	free(accel_ch->task_pool_base);
2453 	free(accel_ch->seq_pool_base);
2454 	free(accel_ch->buf_pool_base);
2455 }
2456 
2457 struct spdk_io_channel *
2458 spdk_accel_get_io_channel(void)
2459 {
2460 	return spdk_get_io_channel(&spdk_accel_module_list);
2461 }
2462 
2463 static int
2464 accel_module_initialize(void)
2465 {
2466 	struct spdk_accel_module_if *accel_module, *tmp_module;
2467 	int rc = 0, module_rc;
2468 
2469 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2470 		module_rc = accel_module->module_init();
2471 		if (module_rc) {
2472 			SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2473 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2474 			if (!rc) {
2475 				rc = module_rc;
2476 			}
2477 		}
2478 
2479 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2480 	}
2481 
2482 	return rc;
2483 }
2484 
2485 static void
2486 accel_module_init_opcode(enum accel_opcode opcode)
2487 {
2488 	struct accel_module *module = &g_modules_opc[opcode];
2489 	struct spdk_accel_module_if *module_if = module->module;
2490 
2491 	if (module_if->get_memory_domains != NULL) {
2492 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2493 	}
2494 }
2495 
2496 int
2497 spdk_accel_initialize(void)
2498 {
2499 	enum accel_opcode op;
2500 	struct spdk_accel_module_if *accel_module = NULL;
2501 	int rc;
2502 
2503 	/*
2504 	 * We need a unique identifier for the accel framework, so use the
2505 	 * spdk_accel_module_list address for this purpose.
2506 	 */
2507 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2508 				sizeof(struct accel_io_channel), "accel");
2509 
2510 	spdk_spin_init(&g_keyring_spin);
2511 	spdk_spin_init(&g_stats_lock);
2512 
2513 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2514 				       "SPDK_ACCEL_DMA_DEVICE");
2515 	if (rc != 0) {
2516 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2517 		return rc;
2518 	}
2519 
2520 	g_modules_started = true;
2521 	rc = accel_module_initialize();
2522 	if (rc) {
2523 		return rc;
2524 	}
2525 
2526 	/* Create our priority global map of opcodes to modules, we populate starting
2527 	 * with the software module (guaranteed to be first on the list) and then
2528 	 * updating opcodes with HW modules that have been initialized.
2529 	 * NOTE: all opcodes must be supported by software in the event that no HW
2530 	 * modules are initialized to support the operation.
2531 	 */
2532 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2533 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2534 			if (accel_module->supports_opcode(op)) {
2535 				g_modules_opc[op].module = accel_module;
2536 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2537 			}
2538 		}
2539 	}
2540 
2541 	/* Now lets check for overrides and apply all that exist */
2542 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2543 		if (g_modules_opc_override[op] != NULL) {
2544 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2545 			if (accel_module == NULL) {
2546 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2547 				return -EINVAL;
2548 			}
2549 			if (accel_module->supports_opcode(op) == false) {
2550 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2551 				return -EINVAL;
2552 			}
2553 			g_modules_opc[op].module = accel_module;
2554 		}
2555 	}
2556 
2557 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2558 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2559 		return -EINVAL;
2560 	}
2561 
2562 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2563 		assert(g_modules_opc[op].module != NULL);
2564 		accel_module_init_opcode(op);
2565 	}
2566 
2567 	rc = spdk_iobuf_register_module("accel");
2568 	if (rc != 0) {
2569 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2570 		return rc;
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static void
2577 accel_module_finish_cb(void)
2578 {
2579 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2580 
2581 	cb_fn(g_fini_cb_arg);
2582 	g_fini_cb_fn = NULL;
2583 	g_fini_cb_arg = NULL;
2584 }
2585 
2586 static void
2587 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2588 			   const char *module_str)
2589 {
2590 	spdk_json_write_object_begin(w);
2591 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2592 	spdk_json_write_named_object_begin(w, "params");
2593 	spdk_json_write_named_string(w, "opname", opc_str);
2594 	spdk_json_write_named_string(w, "module", module_str);
2595 	spdk_json_write_object_end(w);
2596 	spdk_json_write_object_end(w);
2597 }
2598 
2599 static void
2600 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2601 {
2602 	spdk_json_write_named_string(w, "name", key->param.key_name);
2603 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2604 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2605 	if (key->param.hex_key2) {
2606 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2607 	}
2608 
2609 	if (key->param.tweak_mode) {
2610 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2611 	}
2612 }
2613 
2614 void
2615 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2616 {
2617 	spdk_json_write_object_begin(w);
2618 	__accel_crypto_key_dump_param(w, key);
2619 	spdk_json_write_object_end(w);
2620 }
2621 
2622 static void
2623 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2624 				    struct spdk_accel_crypto_key *key)
2625 {
2626 	spdk_json_write_object_begin(w);
2627 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2628 	spdk_json_write_named_object_begin(w, "params");
2629 	__accel_crypto_key_dump_param(w, key);
2630 	spdk_json_write_object_end(w);
2631 	spdk_json_write_object_end(w);
2632 }
2633 
2634 static void
2635 accel_write_options(struct spdk_json_write_ctx *w)
2636 {
2637 	spdk_json_write_object_begin(w);
2638 	spdk_json_write_named_string(w, "method", "accel_set_options");
2639 	spdk_json_write_named_object_begin(w, "params");
2640 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2641 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2642 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2643 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2644 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2645 	spdk_json_write_object_end(w);
2646 	spdk_json_write_object_end(w);
2647 }
2648 
2649 static void
2650 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2651 {
2652 	struct spdk_accel_crypto_key *key;
2653 
2654 	spdk_spin_lock(&g_keyring_spin);
2655 	TAILQ_FOREACH(key, &g_keyring, link) {
2656 		if (full_dump) {
2657 			_accel_crypto_key_write_config_json(w, key);
2658 		} else {
2659 			_accel_crypto_key_dump_param(w, key);
2660 		}
2661 	}
2662 	spdk_spin_unlock(&g_keyring_spin);
2663 }
2664 
2665 void
2666 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2667 {
2668 	_accel_crypto_keys_write_config_json(w, false);
2669 }
2670 
2671 void
2672 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2673 {
2674 	struct spdk_accel_module_if *accel_module;
2675 	int i;
2676 
2677 	spdk_json_write_array_begin(w);
2678 	accel_write_options(w);
2679 
2680 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2681 		if (accel_module->write_config_json) {
2682 			accel_module->write_config_json(w);
2683 		}
2684 	}
2685 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2686 		if (g_modules_opc_override[i]) {
2687 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2688 		}
2689 	}
2690 
2691 	_accel_crypto_keys_write_config_json(w, true);
2692 
2693 	spdk_json_write_array_end(w);
2694 }
2695 
2696 void
2697 spdk_accel_module_finish(void)
2698 {
2699 	if (!g_accel_module) {
2700 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2701 	} else {
2702 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2703 	}
2704 
2705 	if (!g_accel_module) {
2706 		spdk_spin_destroy(&g_keyring_spin);
2707 		spdk_spin_destroy(&g_stats_lock);
2708 		if (g_accel_domain) {
2709 			spdk_memory_domain_destroy(g_accel_domain);
2710 			g_accel_domain = NULL;
2711 		}
2712 		accel_module_finish_cb();
2713 		return;
2714 	}
2715 
2716 	if (g_accel_module->module_fini) {
2717 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2718 	} else {
2719 		spdk_accel_module_finish();
2720 	}
2721 }
2722 
2723 static void
2724 accel_io_device_unregister_cb(void *io_device)
2725 {
2726 	struct spdk_accel_crypto_key *key, *key_tmp;
2727 	enum accel_opcode op;
2728 
2729 	spdk_spin_lock(&g_keyring_spin);
2730 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2731 		accel_crypto_key_destroy_unsafe(key);
2732 	}
2733 	spdk_spin_unlock(&g_keyring_spin);
2734 
2735 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2736 		if (g_modules_opc_override[op] != NULL) {
2737 			free(g_modules_opc_override[op]);
2738 			g_modules_opc_override[op] = NULL;
2739 		}
2740 		g_modules_opc[op].module = NULL;
2741 	}
2742 
2743 	spdk_accel_module_finish();
2744 }
2745 
2746 void
2747 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2748 {
2749 	assert(cb_fn != NULL);
2750 
2751 	g_fini_cb_fn = cb_fn;
2752 	g_fini_cb_arg = cb_arg;
2753 
2754 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
2755 }
2756 
2757 static struct spdk_accel_driver *
2758 accel_find_driver(const char *name)
2759 {
2760 	struct spdk_accel_driver *driver;
2761 
2762 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2763 		if (strcmp(driver->name, name) == 0) {
2764 			return driver;
2765 		}
2766 	}
2767 
2768 	return NULL;
2769 }
2770 
2771 int
2772 spdk_accel_set_driver(const char *name)
2773 {
2774 	struct spdk_accel_driver *driver;
2775 
2776 	driver = accel_find_driver(name);
2777 	if (driver == NULL) {
2778 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2779 		return -ENODEV;
2780 	}
2781 
2782 	g_accel_driver = driver;
2783 
2784 	return 0;
2785 }
2786 
2787 void
2788 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2789 {
2790 	if (accel_find_driver(driver->name)) {
2791 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2792 		assert(0);
2793 		return;
2794 	}
2795 
2796 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2797 }
2798 
2799 int
2800 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2801 {
2802 	if (opts->size > sizeof(*opts)) {
2803 		return -EINVAL;
2804 	}
2805 
2806 	memcpy(&g_opts, opts, opts->size);
2807 
2808 	return 0;
2809 }
2810 
2811 void
2812 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2813 {
2814 	size_t size = opts->size;
2815 
2816 	assert(size <= sizeof(*opts));
2817 
2818 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2819 	opts->size = size;
2820 }
2821 
2822 struct accel_get_stats_ctx {
2823 	struct accel_stats	stats;
2824 	accel_get_stats_cb	cb_fn;
2825 	void			*cb_arg;
2826 };
2827 
2828 static void
2829 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2830 {
2831 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2832 
2833 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2834 	free(ctx);
2835 }
2836 
2837 static void
2838 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2839 {
2840 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2841 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2842 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2843 
2844 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2845 	spdk_for_each_channel_continue(iter, 0);
2846 }
2847 
2848 int
2849 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2850 {
2851 	struct accel_get_stats_ctx *ctx;
2852 
2853 	ctx = calloc(1, sizeof(*ctx));
2854 	if (ctx == NULL) {
2855 		return -ENOMEM;
2856 	}
2857 
2858 	spdk_spin_lock(&g_stats_lock);
2859 	accel_add_stats(&ctx->stats, &g_stats);
2860 	spdk_spin_unlock(&g_stats_lock);
2861 
2862 	ctx->cb_fn = cb_fn;
2863 	ctx->cb_arg = cb_arg;
2864 
2865 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2866 			      accel_get_channel_stats_done);
2867 
2868 	return 0;
2869 }
2870 
2871 void
2872 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum accel_opcode opcode,
2873 			    struct spdk_accel_opcode_stats *stats, size_t size)
2874 {
2875 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2876 
2877 #define FIELD_OK(field) \
2878 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
2879 
2880 #define SET_FIELD(field, value) \
2881 	if (FIELD_OK(field)) { \
2882 		stats->field = value; \
2883 	}
2884 
2885 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
2886 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
2887 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
2888 
2889 #undef FIELD_OK
2890 #undef SET_FIELD
2891 }
2892 
2893 SPDK_LOG_REGISTER_COMPONENT(accel)
2894