xref: /spdk/lib/accel/accel.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
39 
40 struct accel_module {
41 	struct spdk_accel_module_if	*module;
42 	bool				supports_memory_domains;
43 };
44 
45 /* Largest context size for all accel modules */
46 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
47 
48 static struct spdk_accel_module_if *g_accel_module = NULL;
49 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
50 static void *g_fini_cb_arg = NULL;
51 static bool g_modules_started = false;
52 static struct spdk_memory_domain *g_accel_domain;
53 
54 /* Global list of registered accelerator modules */
55 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
56 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
57 
58 /* Crypto keyring */
59 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
60 static struct spdk_spinlock g_keyring_spin;
61 
62 /* Global array mapping capabilities to modules */
63 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
64 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
65 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
66 static struct spdk_accel_driver *g_accel_driver;
67 static struct spdk_accel_opts g_opts = {
68 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
69 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
70 	.task_count = MAX_TASKS_PER_CHANNEL,
71 	.sequence_count = MAX_TASKS_PER_CHANNEL,
72 	.buf_count = MAX_TASKS_PER_CHANNEL,
73 };
74 static struct accel_stats g_stats;
75 static struct spdk_spinlock g_stats_lock;
76 
77 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
78 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
79 	"compress", "decompress", "encrypt", "decrypt", "xor"
80 };
81 
82 enum accel_sequence_state {
83 	ACCEL_SEQUENCE_STATE_INIT,
84 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
85 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
86 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
87 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
88 	ACCEL_SEQUENCE_STATE_PULL_DATA,
89 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
90 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
91 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
92 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
93 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
94 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
95 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
96 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
97 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
98 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
99 	ACCEL_SEQUENCE_STATE_ERROR,
100 	ACCEL_SEQUENCE_STATE_MAX,
101 };
102 
103 static const char *g_seq_states[]
104 __attribute__((unused)) = {
105 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
106 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
107 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
108 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
109 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
110 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
112 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
114 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
115 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
116 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
117 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
118 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
119 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
121 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
122 	[ACCEL_SEQUENCE_STATE_MAX] = "",
123 };
124 
125 #define ACCEL_SEQUENCE_STATE_STRING(s) \
126 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
127 	 ? g_seq_states[s] : "unknown")
128 
129 struct accel_buffer {
130 	struct spdk_accel_sequence	*seq;
131 	void				*buf;
132 	uint64_t			len;
133 	struct spdk_iobuf_entry		iobuf;
134 	spdk_accel_sequence_get_buf_cb	cb_fn;
135 	void				*cb_ctx;
136 	TAILQ_ENTRY(accel_buffer)	link;
137 };
138 
139 struct accel_io_channel {
140 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
141 	struct spdk_io_channel			*driver_channel;
142 	void					*task_pool_base;
143 	struct spdk_accel_sequence		*seq_pool_base;
144 	struct accel_buffer			*buf_pool_base;
145 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
146 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
147 	TAILQ_HEAD(, accel_buffer)		buf_pool;
148 	struct spdk_iobuf_channel		iobuf;
149 	struct accel_stats			stats;
150 };
151 
152 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
153 
154 struct spdk_accel_sequence {
155 	struct accel_io_channel			*ch;
156 	struct accel_sequence_tasks		tasks;
157 	struct accel_sequence_tasks		completed;
158 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
159 	int					status;
160 	/* state uses enum accel_sequence_state */
161 	uint8_t					state;
162 	bool					in_process_sequence;
163 	spdk_accel_completion_cb		cb_fn;
164 	void					*cb_arg;
165 	TAILQ_ENTRY(spdk_accel_sequence)	link;
166 };
167 
168 #define accel_update_stats(ch, event, v) \
169 	do { \
170 		(ch)->stats.event += (v); \
171 	} while (0)
172 
173 #define accel_update_task_stats(ch, task, event, v) \
174 	accel_update_stats(ch, operations[(task)->op_code].event, v)
175 
176 static inline void
177 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
178 {
179 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
180 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
181 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
182 	seq->state = state;
183 }
184 
185 static void
186 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
187 {
188 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
189 	assert(status != 0);
190 	seq->status = status;
191 }
192 
193 int
194 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
195 {
196 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
197 		/* invalid opcode */
198 		return -EINVAL;
199 	}
200 
201 	if (g_modules_opc[opcode].module) {
202 		*module_name = g_modules_opc[opcode].module->name;
203 	} else {
204 		return -ENOENT;
205 	}
206 
207 	return 0;
208 }
209 
210 void
211 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
212 {
213 	struct spdk_accel_module_if *accel_module;
214 	enum spdk_accel_opcode opcode;
215 	int j = 0;
216 
217 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
218 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
219 			if (accel_module->supports_opcode(opcode)) {
220 				info->ops[j] = opcode;
221 				j++;
222 			}
223 		}
224 		info->name = accel_module->name;
225 		info->num_ops = j;
226 		fn(info);
227 		j = 0;
228 	}
229 }
230 
231 int
232 _accel_get_opc_name(enum spdk_accel_opcode opcode, const char **opcode_name)
233 {
234 	int rc = 0;
235 
236 	if (opcode < SPDK_ACCEL_OPC_LAST) {
237 		*opcode_name = g_opcode_strings[opcode];
238 	} else {
239 		/* invalid opcode */
240 		rc = -EINVAL;
241 	}
242 
243 	return rc;
244 }
245 
246 int
247 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
248 {
249 	char *copy;
250 
251 	if (g_modules_started == true) {
252 		/* we don't allow re-assignment once things have started */
253 		return -EINVAL;
254 	}
255 
256 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
257 		/* invalid opcode */
258 		return -EINVAL;
259 	}
260 
261 	copy = strdup(name);
262 	if (copy == NULL) {
263 		return -ENOMEM;
264 	}
265 
266 	/* module selection will be validated after the framework starts. */
267 	g_modules_opc_override[opcode] = copy;
268 
269 	return 0;
270 }
271 
272 void
273 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
274 {
275 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
276 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
277 	void				*cb_arg = accel_task->cb_arg;
278 
279 	/* We should put the accel_task into the list firstly in order to avoid
280 	 * the accel task list is exhausted when there is recursive call to
281 	 * allocate accel_task in user's call back function (cb_fn)
282 	 */
283 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
284 	accel_task->seq = NULL;
285 
286 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
287 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
288 	if (spdk_unlikely(status != 0)) {
289 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
290 	}
291 
292 	cb_fn(cb_arg, status);
293 }
294 
295 inline static struct spdk_accel_task *
296 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
297 {
298 	struct spdk_accel_task *accel_task;
299 
300 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
301 	if (spdk_unlikely(accel_task == NULL)) {
302 		accel_update_stats(accel_ch, retry.task, 1);
303 		return NULL;
304 	}
305 
306 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
307 	accel_task->link.tqe_next = NULL;
308 	accel_task->link.tqe_prev = NULL;
309 
310 	accel_task->cb_fn = cb_fn;
311 	accel_task->cb_arg = cb_arg;
312 	accel_task->accel_ch = accel_ch;
313 	accel_task->s.iovs = NULL;
314 	accel_task->d.iovs = NULL;
315 
316 	return accel_task;
317 }
318 
319 static inline int
320 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
321 {
322 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
323 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
324 	int rc;
325 
326 	rc = module->submit_tasks(module_ch, task);
327 	if (spdk_unlikely(rc != 0)) {
328 		accel_update_task_stats(accel_ch, task, failed, 1);
329 	}
330 
331 	return rc;
332 }
333 
334 static inline uint64_t
335 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
336 {
337 	uint64_t result = 0;
338 	uint32_t i;
339 
340 	for (i = 0; i < iovcnt; ++i) {
341 		result += iovs[i].iov_len;
342 	}
343 
344 	return result;
345 }
346 
347 /* Accel framework public API for copy function */
348 int
349 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
350 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
351 {
352 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
353 	struct spdk_accel_task *accel_task;
354 
355 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
356 	if (spdk_unlikely(accel_task == NULL)) {
357 		return -ENOMEM;
358 	}
359 
360 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
361 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
362 	accel_task->d.iovs[0].iov_base = dst;
363 	accel_task->d.iovs[0].iov_len = nbytes;
364 	accel_task->d.iovcnt = 1;
365 	accel_task->s.iovs[0].iov_base = src;
366 	accel_task->s.iovs[0].iov_len = nbytes;
367 	accel_task->s.iovcnt = 1;
368 	accel_task->nbytes = nbytes;
369 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
370 	accel_task->flags = flags;
371 	accel_task->src_domain = NULL;
372 	accel_task->dst_domain = NULL;
373 	accel_task->step_cb_fn = NULL;
374 
375 	return accel_submit_task(accel_ch, accel_task);
376 }
377 
378 /* Accel framework public API for dual cast copy function */
379 int
380 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
381 			   void *dst2, void *src, uint64_t nbytes, int flags,
382 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
383 {
384 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
385 	struct spdk_accel_task *accel_task;
386 
387 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
388 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
389 		return -EINVAL;
390 	}
391 
392 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
393 	if (spdk_unlikely(accel_task == NULL)) {
394 		return -ENOMEM;
395 	}
396 
397 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
398 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
399 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
400 	accel_task->d.iovs[0].iov_base = dst1;
401 	accel_task->d.iovs[0].iov_len = nbytes;
402 	accel_task->d.iovcnt = 1;
403 	accel_task->d2.iovs[0].iov_base = dst2;
404 	accel_task->d2.iovs[0].iov_len = nbytes;
405 	accel_task->d2.iovcnt = 1;
406 	accel_task->s.iovs[0].iov_base = src;
407 	accel_task->s.iovs[0].iov_len = nbytes;
408 	accel_task->s.iovcnt = 1;
409 	accel_task->nbytes = nbytes;
410 	accel_task->flags = flags;
411 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
412 	accel_task->src_domain = NULL;
413 	accel_task->dst_domain = NULL;
414 	accel_task->step_cb_fn = NULL;
415 
416 	return accel_submit_task(accel_ch, accel_task);
417 }
418 
419 /* Accel framework public API for compare function */
420 int
421 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
422 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
423 			  void *cb_arg)
424 {
425 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
426 	struct spdk_accel_task *accel_task;
427 
428 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
429 	if (spdk_unlikely(accel_task == NULL)) {
430 		return -ENOMEM;
431 	}
432 
433 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
434 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
435 	accel_task->s.iovs[0].iov_base = src1;
436 	accel_task->s.iovs[0].iov_len = nbytes;
437 	accel_task->s.iovcnt = 1;
438 	accel_task->s2.iovs[0].iov_base = src2;
439 	accel_task->s2.iovs[0].iov_len = nbytes;
440 	accel_task->s2.iovcnt = 1;
441 	accel_task->nbytes = nbytes;
442 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
443 	accel_task->src_domain = NULL;
444 	accel_task->dst_domain = NULL;
445 	accel_task->step_cb_fn = NULL;
446 
447 	return accel_submit_task(accel_ch, accel_task);
448 }
449 
450 /* Accel framework public API for fill function */
451 int
452 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
453 		       uint8_t fill, uint64_t nbytes, int flags,
454 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
455 {
456 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
457 	struct spdk_accel_task *accel_task;
458 
459 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
460 	if (spdk_unlikely(accel_task == NULL)) {
461 		return -ENOMEM;
462 	}
463 
464 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
465 	accel_task->d.iovs[0].iov_base = dst;
466 	accel_task->d.iovs[0].iov_len = nbytes;
467 	accel_task->d.iovcnt = 1;
468 	accel_task->nbytes = nbytes;
469 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
470 	accel_task->flags = flags;
471 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
472 	accel_task->src_domain = NULL;
473 	accel_task->dst_domain = NULL;
474 	accel_task->step_cb_fn = NULL;
475 
476 	return accel_submit_task(accel_ch, accel_task);
477 }
478 
479 /* Accel framework public API for CRC-32C function */
480 int
481 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
482 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
483 			 void *cb_arg)
484 {
485 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
486 	struct spdk_accel_task *accel_task;
487 
488 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
489 	if (spdk_unlikely(accel_task == NULL)) {
490 		return -ENOMEM;
491 	}
492 
493 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
494 	accel_task->s.iovs[0].iov_base = src;
495 	accel_task->s.iovs[0].iov_len = nbytes;
496 	accel_task->s.iovcnt = 1;
497 	accel_task->nbytes = nbytes;
498 	accel_task->crc_dst = crc_dst;
499 	accel_task->seed = seed;
500 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
501 	accel_task->src_domain = NULL;
502 	accel_task->dst_domain = NULL;
503 	accel_task->step_cb_fn = NULL;
504 
505 	return accel_submit_task(accel_ch, accel_task);
506 }
507 
508 /* Accel framework public API for chained CRC-32C function */
509 int
510 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
511 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
512 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
513 {
514 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
515 	struct spdk_accel_task *accel_task;
516 
517 	if (iov == NULL) {
518 		SPDK_ERRLOG("iov should not be NULL");
519 		return -EINVAL;
520 	}
521 
522 	if (!iov_cnt) {
523 		SPDK_ERRLOG("iovcnt should not be zero value\n");
524 		return -EINVAL;
525 	}
526 
527 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
528 	if (spdk_unlikely(accel_task == NULL)) {
529 		SPDK_ERRLOG("no memory\n");
530 		assert(0);
531 		return -ENOMEM;
532 	}
533 
534 	accel_task->s.iovs = iov;
535 	accel_task->s.iovcnt = iov_cnt;
536 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
537 	accel_task->crc_dst = crc_dst;
538 	accel_task->seed = seed;
539 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
540 	accel_task->src_domain = NULL;
541 	accel_task->dst_domain = NULL;
542 	accel_task->step_cb_fn = NULL;
543 
544 	return accel_submit_task(accel_ch, accel_task);
545 }
546 
547 /* Accel framework public API for copy with CRC-32C function */
548 int
549 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
550 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
551 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
552 {
553 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
554 	struct spdk_accel_task *accel_task;
555 
556 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
557 	if (spdk_unlikely(accel_task == NULL)) {
558 		return -ENOMEM;
559 	}
560 
561 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
562 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
563 	accel_task->d.iovs[0].iov_base = dst;
564 	accel_task->d.iovs[0].iov_len = nbytes;
565 	accel_task->d.iovcnt = 1;
566 	accel_task->s.iovs[0].iov_base = src;
567 	accel_task->s.iovs[0].iov_len = nbytes;
568 	accel_task->s.iovcnt = 1;
569 	accel_task->nbytes = nbytes;
570 	accel_task->crc_dst = crc_dst;
571 	accel_task->seed = seed;
572 	accel_task->flags = flags;
573 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
574 	accel_task->src_domain = NULL;
575 	accel_task->dst_domain = NULL;
576 	accel_task->step_cb_fn = NULL;
577 
578 	return accel_submit_task(accel_ch, accel_task);
579 }
580 
581 /* Accel framework public API for chained copy + CRC-32C function */
582 int
583 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
584 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
585 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
586 {
587 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
588 	struct spdk_accel_task *accel_task;
589 	uint64_t nbytes;
590 
591 	if (src_iovs == NULL) {
592 		SPDK_ERRLOG("iov should not be NULL");
593 		return -EINVAL;
594 	}
595 
596 	if (!iov_cnt) {
597 		SPDK_ERRLOG("iovcnt should not be zero value\n");
598 		return -EINVAL;
599 	}
600 
601 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
602 	if (spdk_unlikely(accel_task == NULL)) {
603 		SPDK_ERRLOG("no memory\n");
604 		assert(0);
605 		return -ENOMEM;
606 	}
607 
608 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
609 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
610 	accel_task->d.iovs[0].iov_base = dst;
611 	accel_task->d.iovs[0].iov_len = nbytes;
612 	accel_task->d.iovcnt = 1;
613 	accel_task->s.iovs = src_iovs;
614 	accel_task->s.iovcnt = iov_cnt;
615 	accel_task->nbytes = nbytes;
616 	accel_task->crc_dst = crc_dst;
617 	accel_task->seed = seed;
618 	accel_task->flags = flags;
619 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
620 	accel_task->src_domain = NULL;
621 	accel_task->dst_domain = NULL;
622 	accel_task->step_cb_fn = NULL;
623 
624 	return accel_submit_task(accel_ch, accel_task);
625 }
626 
627 int
628 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
629 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
630 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
631 {
632 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
633 	struct spdk_accel_task *accel_task;
634 
635 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
636 	if (spdk_unlikely(accel_task == NULL)) {
637 		return -ENOMEM;
638 	}
639 
640 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
641 	accel_task->d.iovs[0].iov_base = dst;
642 	accel_task->d.iovs[0].iov_len = nbytes;
643 	accel_task->d.iovcnt = 1;
644 	accel_task->output_size = output_size;
645 	accel_task->s.iovs = src_iovs;
646 	accel_task->s.iovcnt = src_iovcnt;
647 	accel_task->nbytes = nbytes;
648 	accel_task->flags = flags;
649 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
650 	accel_task->src_domain = NULL;
651 	accel_task->dst_domain = NULL;
652 	accel_task->step_cb_fn = NULL;
653 
654 	return accel_submit_task(accel_ch, accel_task);
655 }
656 
657 int
658 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
659 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
660 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
661 			     void *cb_arg)
662 {
663 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
664 	struct spdk_accel_task *accel_task;
665 
666 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
667 	if (spdk_unlikely(accel_task == NULL)) {
668 		return -ENOMEM;
669 	}
670 
671 	accel_task->output_size = output_size;
672 	accel_task->s.iovs = src_iovs;
673 	accel_task->s.iovcnt = src_iovcnt;
674 	accel_task->d.iovs = dst_iovs;
675 	accel_task->d.iovcnt = dst_iovcnt;
676 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
677 	accel_task->flags = flags;
678 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
679 	accel_task->src_domain = NULL;
680 	accel_task->dst_domain = NULL;
681 	accel_task->step_cb_fn = NULL;
682 
683 	return accel_submit_task(accel_ch, accel_task);
684 }
685 
686 int
687 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
688 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
689 			  struct iovec *src_iovs, uint32_t src_iovcnt,
690 			  uint64_t iv, uint32_t block_size, int flags,
691 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
692 {
693 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
694 	struct spdk_accel_task *accel_task;
695 
696 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
697 		return -EINVAL;
698 	}
699 
700 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
701 	if (spdk_unlikely(accel_task == NULL)) {
702 		return -ENOMEM;
703 	}
704 
705 	accel_task->crypto_key = key;
706 	accel_task->s.iovs = src_iovs;
707 	accel_task->s.iovcnt = src_iovcnt;
708 	accel_task->d.iovs = dst_iovs;
709 	accel_task->d.iovcnt = dst_iovcnt;
710 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
711 	accel_task->iv = iv;
712 	accel_task->block_size = block_size;
713 	accel_task->flags = flags;
714 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
715 	accel_task->src_domain = NULL;
716 	accel_task->dst_domain = NULL;
717 	accel_task->step_cb_fn = NULL;
718 
719 	return accel_submit_task(accel_ch, accel_task);
720 }
721 
722 int
723 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
724 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
725 			  struct iovec *src_iovs, uint32_t src_iovcnt,
726 			  uint64_t iv, uint32_t block_size, int flags,
727 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
728 {
729 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
730 	struct spdk_accel_task *accel_task;
731 
732 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
733 		return -EINVAL;
734 	}
735 
736 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
737 	if (spdk_unlikely(accel_task == NULL)) {
738 		return -ENOMEM;
739 	}
740 
741 	accel_task->crypto_key = key;
742 	accel_task->s.iovs = src_iovs;
743 	accel_task->s.iovcnt = src_iovcnt;
744 	accel_task->d.iovs = dst_iovs;
745 	accel_task->d.iovcnt = dst_iovcnt;
746 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
747 	accel_task->iv = iv;
748 	accel_task->block_size = block_size;
749 	accel_task->flags = flags;
750 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
751 	accel_task->src_domain = NULL;
752 	accel_task->dst_domain = NULL;
753 	accel_task->step_cb_fn = NULL;
754 
755 	return accel_submit_task(accel_ch, accel_task);
756 }
757 
758 int
759 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
760 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
761 {
762 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
763 	struct spdk_accel_task *accel_task;
764 
765 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
766 	if (spdk_unlikely(accel_task == NULL)) {
767 		return -ENOMEM;
768 	}
769 
770 	accel_task->nsrcs.srcs = sources;
771 	accel_task->nsrcs.cnt = nsrcs;
772 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
773 	accel_task->d.iovs[0].iov_base = dst;
774 	accel_task->d.iovs[0].iov_len = nbytes;
775 	accel_task->d.iovcnt = 1;
776 	accel_task->nbytes = nbytes;
777 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
778 	accel_task->src_domain = NULL;
779 	accel_task->dst_domain = NULL;
780 	accel_task->step_cb_fn = NULL;
781 
782 	return accel_submit_task(accel_ch, accel_task);
783 }
784 
785 static inline struct accel_buffer *
786 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
787 {
788 	struct accel_buffer *buf;
789 
790 	buf = TAILQ_FIRST(&ch->buf_pool);
791 	if (spdk_unlikely(buf == NULL)) {
792 		accel_update_stats(ch, retry.bufdesc, 1);
793 		return NULL;
794 	}
795 
796 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
797 	buf->len = len;
798 	buf->buf = NULL;
799 	buf->seq = NULL;
800 	buf->cb_fn = NULL;
801 
802 	return buf;
803 }
804 
805 static inline void
806 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
807 {
808 	if (buf->buf != NULL) {
809 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
810 	}
811 
812 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
813 }
814 
815 static inline struct spdk_accel_sequence *
816 accel_sequence_get(struct accel_io_channel *ch)
817 {
818 	struct spdk_accel_sequence *seq;
819 
820 	seq = TAILQ_FIRST(&ch->seq_pool);
821 	if (spdk_unlikely(seq == NULL)) {
822 		accel_update_stats(ch, retry.sequence, 1);
823 		return NULL;
824 	}
825 
826 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
827 
828 	TAILQ_INIT(&seq->tasks);
829 	TAILQ_INIT(&seq->completed);
830 	TAILQ_INIT(&seq->bounce_bufs);
831 
832 	seq->ch = ch;
833 	seq->status = 0;
834 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
835 	seq->in_process_sequence = false;
836 
837 	return seq;
838 }
839 
840 static inline void
841 accel_sequence_put(struct spdk_accel_sequence *seq)
842 {
843 	struct accel_io_channel *ch = seq->ch;
844 	struct accel_buffer *buf;
845 
846 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
847 		buf = TAILQ_FIRST(&seq->bounce_bufs);
848 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
849 		accel_put_buf(seq->ch, buf);
850 	}
851 
852 	assert(TAILQ_EMPTY(&seq->tasks));
853 	assert(TAILQ_EMPTY(&seq->completed));
854 	seq->ch = NULL;
855 
856 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
857 }
858 
859 static void accel_sequence_task_cb(void *cb_arg, int status);
860 
861 static inline struct spdk_accel_task *
862 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
863 			spdk_accel_step_cb cb_fn, void *cb_arg)
864 {
865 	struct spdk_accel_task *task;
866 
867 	task = _get_task(ch, accel_sequence_task_cb, seq);
868 	if (spdk_unlikely(task == NULL)) {
869 		return task;
870 	}
871 
872 	task->step_cb_fn = cb_fn;
873 	task->step_cb_arg = cb_arg;
874 	task->seq = seq;
875 
876 	return task;
877 }
878 
879 int
880 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
881 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
882 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
883 		       struct iovec *src_iovs, uint32_t src_iovcnt,
884 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
885 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
886 {
887 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
888 	struct spdk_accel_task *task;
889 	struct spdk_accel_sequence *seq = *pseq;
890 
891 	if (seq == NULL) {
892 		seq = accel_sequence_get(accel_ch);
893 		if (spdk_unlikely(seq == NULL)) {
894 			return -ENOMEM;
895 		}
896 	}
897 
898 	assert(seq->ch == accel_ch);
899 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
900 	if (spdk_unlikely(task == NULL)) {
901 		if (*pseq == NULL) {
902 			accel_sequence_put(seq);
903 		}
904 
905 		return -ENOMEM;
906 	}
907 
908 	task->dst_domain = dst_domain;
909 	task->dst_domain_ctx = dst_domain_ctx;
910 	task->d.iovs = dst_iovs;
911 	task->d.iovcnt = dst_iovcnt;
912 	task->src_domain = src_domain;
913 	task->src_domain_ctx = src_domain_ctx;
914 	task->s.iovs = src_iovs;
915 	task->s.iovcnt = src_iovcnt;
916 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
917 	task->flags = flags;
918 	task->op_code = SPDK_ACCEL_OPC_COPY;
919 
920 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
921 	*pseq = seq;
922 
923 	return 0;
924 }
925 
926 int
927 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
928 		       void *buf, uint64_t len,
929 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
930 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
931 {
932 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
933 	struct spdk_accel_task *task;
934 	struct spdk_accel_sequence *seq = *pseq;
935 
936 	if (seq == NULL) {
937 		seq = accel_sequence_get(accel_ch);
938 		if (spdk_unlikely(seq == NULL)) {
939 			return -ENOMEM;
940 		}
941 	}
942 
943 	assert(seq->ch == accel_ch);
944 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
945 	if (spdk_unlikely(task == NULL)) {
946 		if (*pseq == NULL) {
947 			accel_sequence_put(seq);
948 		}
949 
950 		return -ENOMEM;
951 	}
952 
953 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
954 
955 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
956 	task->d.iovs[0].iov_base = buf;
957 	task->d.iovs[0].iov_len = len;
958 	task->d.iovcnt = 1;
959 	task->nbytes = len;
960 	task->src_domain = NULL;
961 	task->dst_domain = domain;
962 	task->dst_domain_ctx = domain_ctx;
963 	task->flags = flags;
964 	task->op_code = SPDK_ACCEL_OPC_FILL;
965 
966 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
967 	*pseq = seq;
968 
969 	return 0;
970 }
971 
972 int
973 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
974 			     struct iovec *dst_iovs, size_t dst_iovcnt,
975 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
976 			     struct iovec *src_iovs, size_t src_iovcnt,
977 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
978 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
979 {
980 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
981 	struct spdk_accel_task *task;
982 	struct spdk_accel_sequence *seq = *pseq;
983 
984 	if (seq == NULL) {
985 		seq = accel_sequence_get(accel_ch);
986 		if (spdk_unlikely(seq == NULL)) {
987 			return -ENOMEM;
988 		}
989 	}
990 
991 	assert(seq->ch == accel_ch);
992 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
993 	if (spdk_unlikely(task == NULL)) {
994 		if (*pseq == NULL) {
995 			accel_sequence_put(seq);
996 		}
997 
998 		return -ENOMEM;
999 	}
1000 
1001 	/* TODO: support output_size for chaining */
1002 	task->output_size = NULL;
1003 	task->dst_domain = dst_domain;
1004 	task->dst_domain_ctx = dst_domain_ctx;
1005 	task->d.iovs = dst_iovs;
1006 	task->d.iovcnt = dst_iovcnt;
1007 	task->src_domain = src_domain;
1008 	task->src_domain_ctx = src_domain_ctx;
1009 	task->s.iovs = src_iovs;
1010 	task->s.iovcnt = src_iovcnt;
1011 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1012 	task->flags = flags;
1013 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1014 
1015 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1016 	*pseq = seq;
1017 
1018 	return 0;
1019 }
1020 
1021 int
1022 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1023 			  struct spdk_accel_crypto_key *key,
1024 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1025 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1026 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1027 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1028 			  uint64_t iv, uint32_t block_size, int flags,
1029 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1030 {
1031 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1032 	struct spdk_accel_task *task;
1033 	struct spdk_accel_sequence *seq = *pseq;
1034 
1035 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1036 
1037 	if (seq == NULL) {
1038 		seq = accel_sequence_get(accel_ch);
1039 		if (spdk_unlikely(seq == NULL)) {
1040 			return -ENOMEM;
1041 		}
1042 	}
1043 
1044 	assert(seq->ch == accel_ch);
1045 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1046 	if (spdk_unlikely(task == NULL)) {
1047 		if (*pseq == NULL) {
1048 			accel_sequence_put(seq);
1049 		}
1050 
1051 		return -ENOMEM;
1052 	}
1053 
1054 	task->crypto_key = key;
1055 	task->src_domain = src_domain;
1056 	task->src_domain_ctx = src_domain_ctx;
1057 	task->s.iovs = src_iovs;
1058 	task->s.iovcnt = src_iovcnt;
1059 	task->dst_domain = dst_domain;
1060 	task->dst_domain_ctx = dst_domain_ctx;
1061 	task->d.iovs = dst_iovs;
1062 	task->d.iovcnt = dst_iovcnt;
1063 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1064 	task->iv = iv;
1065 	task->block_size = block_size;
1066 	task->flags = flags;
1067 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1068 
1069 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1070 	*pseq = seq;
1071 
1072 	return 0;
1073 }
1074 
1075 int
1076 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1077 			  struct spdk_accel_crypto_key *key,
1078 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1079 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1080 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1081 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1082 			  uint64_t iv, uint32_t block_size, int flags,
1083 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1084 {
1085 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1086 	struct spdk_accel_task *task;
1087 	struct spdk_accel_sequence *seq = *pseq;
1088 
1089 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1090 
1091 	if (seq == NULL) {
1092 		seq = accel_sequence_get(accel_ch);
1093 		if (spdk_unlikely(seq == NULL)) {
1094 			return -ENOMEM;
1095 		}
1096 	}
1097 
1098 	assert(seq->ch == accel_ch);
1099 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1100 	if (spdk_unlikely(task == NULL)) {
1101 		if (*pseq == NULL) {
1102 			accel_sequence_put(seq);
1103 		}
1104 
1105 		return -ENOMEM;
1106 	}
1107 
1108 	task->crypto_key = key;
1109 	task->src_domain = src_domain;
1110 	task->src_domain_ctx = src_domain_ctx;
1111 	task->s.iovs = src_iovs;
1112 	task->s.iovcnt = src_iovcnt;
1113 	task->dst_domain = dst_domain;
1114 	task->dst_domain_ctx = dst_domain_ctx;
1115 	task->d.iovs = dst_iovs;
1116 	task->d.iovcnt = dst_iovcnt;
1117 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1118 	task->iv = iv;
1119 	task->block_size = block_size;
1120 	task->flags = flags;
1121 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1122 
1123 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1124 	*pseq = seq;
1125 
1126 	return 0;
1127 }
1128 
1129 int
1130 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1131 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1132 			 struct spdk_memory_domain *domain, void *domain_ctx,
1133 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1134 {
1135 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1136 	struct spdk_accel_task *task;
1137 	struct spdk_accel_sequence *seq = *pseq;
1138 
1139 	if (seq == NULL) {
1140 		seq = accel_sequence_get(accel_ch);
1141 		if (spdk_unlikely(seq == NULL)) {
1142 			return -ENOMEM;
1143 		}
1144 	}
1145 
1146 	assert(seq->ch == accel_ch);
1147 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1148 	if (spdk_unlikely(task == NULL)) {
1149 		if (*pseq == NULL) {
1150 			accel_sequence_put(seq);
1151 		}
1152 
1153 		return -ENOMEM;
1154 	}
1155 
1156 	task->s.iovs = iovs;
1157 	task->s.iovcnt = iovcnt;
1158 	task->src_domain = domain;
1159 	task->src_domain_ctx = domain_ctx;
1160 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1161 	task->crc_dst = dst;
1162 	task->seed = seed;
1163 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1164 	task->dst_domain = NULL;
1165 
1166 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1167 	*pseq = seq;
1168 
1169 	return 0;
1170 }
1171 
1172 int
1173 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1174 		   struct spdk_memory_domain **domain, void **domain_ctx)
1175 {
1176 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1177 	struct accel_buffer *accel_buf;
1178 
1179 	accel_buf = accel_get_buf(accel_ch, len);
1180 	if (spdk_unlikely(accel_buf == NULL)) {
1181 		return -ENOMEM;
1182 	}
1183 
1184 	/* We always return the same pointer and identify the buffers through domain_ctx */
1185 	*buf = ACCEL_BUFFER_BASE;
1186 	*domain_ctx = accel_buf;
1187 	*domain = g_accel_domain;
1188 
1189 	return 0;
1190 }
1191 
1192 void
1193 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1194 		   struct spdk_memory_domain *domain, void *domain_ctx)
1195 {
1196 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1197 	struct accel_buffer *accel_buf = domain_ctx;
1198 
1199 	assert(domain == g_accel_domain);
1200 	assert(buf == ACCEL_BUFFER_BASE);
1201 
1202 	accel_put_buf(accel_ch, accel_buf);
1203 }
1204 
1205 static void
1206 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1207 {
1208 	struct spdk_accel_task *task;
1209 	struct accel_io_channel *ch = seq->ch;
1210 	spdk_accel_step_cb cb_fn;
1211 	void *cb_arg;
1212 
1213 	while (!TAILQ_EMPTY(&seq->completed)) {
1214 		task = TAILQ_FIRST(&seq->completed);
1215 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1216 		cb_fn = task->step_cb_fn;
1217 		cb_arg = task->step_cb_arg;
1218 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1219 		if (cb_fn != NULL) {
1220 			cb_fn(cb_arg);
1221 		}
1222 	}
1223 
1224 	while (!TAILQ_EMPTY(&seq->tasks)) {
1225 		task = TAILQ_FIRST(&seq->tasks);
1226 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1227 		cb_fn = task->step_cb_fn;
1228 		cb_arg = task->step_cb_arg;
1229 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1230 		if (cb_fn != NULL) {
1231 			cb_fn(cb_arg);
1232 		}
1233 	}
1234 }
1235 
1236 static void
1237 accel_sequence_complete(struct spdk_accel_sequence *seq)
1238 {
1239 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1240 
1241 	accel_update_stats(seq->ch, sequence_executed, 1);
1242 	if (spdk_unlikely(seq->status != 0)) {
1243 		accel_update_stats(seq->ch, sequence_failed, 1);
1244 	}
1245 
1246 	/* First notify all users that appended operations to this sequence */
1247 	accel_sequence_complete_tasks(seq);
1248 
1249 	/* Then notify the user that finished the sequence */
1250 	seq->cb_fn(seq->cb_arg, seq->status);
1251 
1252 	accel_sequence_put(seq);
1253 }
1254 
1255 static void
1256 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1257 {
1258 	uintptr_t offset;
1259 
1260 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1261 	assert(offset < accel_buf->len);
1262 
1263 	diov->iov_base = (char *)accel_buf->buf + offset;
1264 	diov->iov_len = siov->iov_len;
1265 }
1266 
1267 static void
1268 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1269 {
1270 	struct spdk_accel_task *task;
1271 	struct iovec *iov;
1272 
1273 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1274 	 * in a sequence that were using it.
1275 	 */
1276 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1277 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1278 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1279 			assert(task->s.iovcnt == 1);
1280 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1281 			task->src_domain = NULL;
1282 			task->s.iovs = iov;
1283 		}
1284 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1285 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1286 			assert(task->d.iovcnt == 1);
1287 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1288 			task->dst_domain = NULL;
1289 			task->d.iovs = iov;
1290 		}
1291 	}
1292 }
1293 
1294 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1295 
1296 static void
1297 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1298 {
1299 	struct accel_buffer *accel_buf;
1300 
1301 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1302 
1303 	assert(accel_buf->seq != NULL);
1304 	assert(accel_buf->buf == NULL);
1305 	accel_buf->buf = buf;
1306 
1307 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1308 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1309 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1310 	accel_process_sequence(accel_buf->seq);
1311 }
1312 
1313 static bool
1314 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1315 			 spdk_iobuf_get_cb cb_fn)
1316 {
1317 	struct accel_io_channel *ch = seq->ch;
1318 
1319 	assert(buf->buf == NULL);
1320 	assert(buf->seq == NULL);
1321 
1322 	buf->seq = seq;
1323 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1324 	if (buf->buf == NULL) {
1325 		accel_update_stats(ch, retry.iobuf, 1);
1326 		return false;
1327 	}
1328 
1329 	return true;
1330 }
1331 
1332 static bool
1333 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1334 {
1335 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1336 	 * NULL */
1337 	if (task->src_domain == g_accel_domain) {
1338 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1339 					      accel_iobuf_get_virtbuf_cb)) {
1340 			return false;
1341 		}
1342 
1343 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1344 	}
1345 
1346 	if (task->dst_domain == g_accel_domain) {
1347 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1348 					      accel_iobuf_get_virtbuf_cb)) {
1349 			return false;
1350 		}
1351 
1352 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1353 	}
1354 
1355 	return true;
1356 }
1357 
1358 static void
1359 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1360 {
1361 	struct accel_buffer *accel_buf;
1362 
1363 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1364 
1365 	assert(accel_buf->seq != NULL);
1366 	assert(accel_buf->buf == NULL);
1367 	accel_buf->buf = buf;
1368 
1369 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1370 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1371 }
1372 
1373 bool
1374 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1375 			      struct spdk_memory_domain *domain, void *domain_ctx,
1376 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1377 {
1378 	struct accel_buffer *accel_buf = domain_ctx;
1379 
1380 	assert(domain == g_accel_domain);
1381 	accel_buf->cb_fn = cb_fn;
1382 	accel_buf->cb_ctx = cb_ctx;
1383 
1384 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1385 		return false;
1386 	}
1387 
1388 	accel_sequence_set_virtbuf(seq, accel_buf);
1389 
1390 	return true;
1391 }
1392 
1393 struct spdk_accel_task *
1394 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1395 {
1396 	return TAILQ_FIRST(&seq->tasks);
1397 }
1398 
1399 struct spdk_accel_task *
1400 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1401 {
1402 	return TAILQ_NEXT(task, seq_link);
1403 }
1404 
1405 static inline void
1406 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1407 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1408 			struct accel_buffer *buf)
1409 {
1410 	bounce->orig_iovs = *iovs;
1411 	bounce->orig_iovcnt = *iovcnt;
1412 	bounce->orig_domain = *domain;
1413 	bounce->orig_domain_ctx = *domain_ctx;
1414 	bounce->iov.iov_base = buf->buf;
1415 	bounce->iov.iov_len = buf->len;
1416 
1417 	*iovs = &bounce->iov;
1418 	*iovcnt = 1;
1419 	*domain = NULL;
1420 }
1421 
1422 static void
1423 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1424 {
1425 	struct spdk_accel_task *task;
1426 	struct accel_buffer *accel_buf;
1427 
1428 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1429 	assert(accel_buf->buf == NULL);
1430 	accel_buf->buf = buf;
1431 
1432 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1433 	assert(task != NULL);
1434 
1435 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1436 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1437 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1438 				&task->src_domain_ctx, accel_buf);
1439 	accel_process_sequence(accel_buf->seq);
1440 }
1441 
1442 static void
1443 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1444 {
1445 	struct spdk_accel_task *task;
1446 	struct accel_buffer *accel_buf;
1447 
1448 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1449 	assert(accel_buf->buf == NULL);
1450 	accel_buf->buf = buf;
1451 
1452 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1453 	assert(task != NULL);
1454 
1455 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1456 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1457 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1458 				&task->dst_domain_ctx, accel_buf);
1459 	accel_process_sequence(accel_buf->seq);
1460 }
1461 
1462 static int
1463 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1464 {
1465 	struct accel_buffer *buf;
1466 
1467 	if (task->src_domain != NULL) {
1468 		/* By the time we're here, accel buffers should have been allocated */
1469 		assert(task->src_domain != g_accel_domain);
1470 
1471 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1472 		if (buf == NULL) {
1473 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1474 			return -ENOMEM;
1475 		}
1476 
1477 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1478 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1479 			return -EAGAIN;
1480 		}
1481 
1482 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1483 					&task->src_domain, &task->src_domain_ctx, buf);
1484 	}
1485 
1486 	if (task->dst_domain != NULL) {
1487 		/* By the time we're here, accel buffers should have been allocated */
1488 		assert(task->dst_domain != g_accel_domain);
1489 
1490 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1491 		if (buf == NULL) {
1492 			/* The src buffer will be released when a sequence is completed */
1493 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1494 			return -ENOMEM;
1495 		}
1496 
1497 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1498 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1499 			return -EAGAIN;
1500 		}
1501 
1502 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1503 					&task->dst_domain, &task->dst_domain_ctx, buf);
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 static void
1510 accel_task_pull_data_cb(void *ctx, int status)
1511 {
1512 	struct spdk_accel_sequence *seq = ctx;
1513 
1514 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1515 	if (spdk_likely(status == 0)) {
1516 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1517 	} else {
1518 		accel_sequence_set_fail(seq, status);
1519 	}
1520 
1521 	accel_process_sequence(seq);
1522 }
1523 
1524 static void
1525 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1526 {
1527 	int rc;
1528 
1529 	assert(task->bounce.s.orig_iovs != NULL);
1530 	assert(task->bounce.s.orig_domain != NULL);
1531 	assert(task->bounce.s.orig_domain != g_accel_domain);
1532 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1533 
1534 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1535 					  task->bounce.s.orig_domain_ctx,
1536 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1537 					  task->s.iovs, task->s.iovcnt,
1538 					  accel_task_pull_data_cb, seq);
1539 	if (spdk_unlikely(rc != 0)) {
1540 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1541 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1542 		accel_sequence_set_fail(seq, rc);
1543 	}
1544 }
1545 
1546 static void
1547 accel_task_push_data_cb(void *ctx, int status)
1548 {
1549 	struct spdk_accel_sequence *seq = ctx;
1550 
1551 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1552 	if (spdk_likely(status == 0)) {
1553 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1554 	} else {
1555 		accel_sequence_set_fail(seq, status);
1556 	}
1557 
1558 	accel_process_sequence(seq);
1559 }
1560 
1561 static void
1562 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1563 {
1564 	int rc;
1565 
1566 	assert(task->bounce.d.orig_iovs != NULL);
1567 	assert(task->bounce.d.orig_domain != NULL);
1568 	assert(task->bounce.d.orig_domain != g_accel_domain);
1569 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1570 
1571 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1572 					  task->bounce.d.orig_domain_ctx,
1573 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1574 					  task->d.iovs, task->d.iovcnt,
1575 					  accel_task_push_data_cb, seq);
1576 	if (spdk_unlikely(rc != 0)) {
1577 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1578 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1579 		accel_sequence_set_fail(seq, rc);
1580 	}
1581 }
1582 
1583 static void
1584 accel_process_sequence(struct spdk_accel_sequence *seq)
1585 {
1586 	struct accel_io_channel *accel_ch = seq->ch;
1587 	struct spdk_accel_task *task;
1588 	enum accel_sequence_state state;
1589 	int rc;
1590 
1591 	/* Prevent recursive calls to this function */
1592 	if (spdk_unlikely(seq->in_process_sequence)) {
1593 		return;
1594 	}
1595 	seq->in_process_sequence = true;
1596 
1597 	task = TAILQ_FIRST(&seq->tasks);
1598 	do {
1599 		state = seq->state;
1600 		switch (state) {
1601 		case ACCEL_SEQUENCE_STATE_INIT:
1602 			if (g_accel_driver != NULL) {
1603 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1604 				break;
1605 			}
1606 		/* Fall through */
1607 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1608 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1609 			if (!accel_sequence_check_virtbuf(seq, task)) {
1610 				/* We couldn't allocate a buffer, wait until one is available */
1611 				break;
1612 			}
1613 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1614 		/* Fall through */
1615 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1616 			/* If a module supports memory domains, we don't need to allocate bounce
1617 			 * buffers */
1618 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1619 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1620 				break;
1621 			}
1622 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1623 			rc = accel_sequence_check_bouncebuf(seq, task);
1624 			if (spdk_unlikely(rc != 0)) {
1625 				/* We couldn't allocate a buffer, wait until one is available */
1626 				if (rc == -EAGAIN) {
1627 					break;
1628 				}
1629 				accel_sequence_set_fail(seq, rc);
1630 				break;
1631 			}
1632 			if (task->s.iovs == &task->bounce.s.iov) {
1633 				assert(task->bounce.s.orig_iovs);
1634 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1635 				break;
1636 			}
1637 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1638 		/* Fall through */
1639 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1640 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1641 				      g_opcode_strings[task->op_code], seq);
1642 
1643 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1644 			rc = accel_submit_task(accel_ch, task);
1645 			if (spdk_unlikely(rc != 0)) {
1646 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1647 					    g_opcode_strings[task->op_code], seq);
1648 				accel_sequence_set_fail(seq, rc);
1649 			}
1650 			break;
1651 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1652 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1653 			accel_task_pull_data(seq, task);
1654 			break;
1655 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1656 			if (task->d.iovs == &task->bounce.d.iov) {
1657 				assert(task->bounce.d.orig_iovs);
1658 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1659 				break;
1660 			}
1661 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1662 			break;
1663 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1664 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1665 			accel_task_push_data(seq, task);
1666 			break;
1667 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1668 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1669 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1670 			/* Check if there are any remaining tasks */
1671 			task = TAILQ_FIRST(&seq->tasks);
1672 			if (task == NULL) {
1673 				/* Immediately return here to make sure we don't touch the sequence
1674 				 * after it's completed */
1675 				accel_sequence_complete(seq);
1676 				return;
1677 			}
1678 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1679 			break;
1680 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1681 			assert(!TAILQ_EMPTY(&seq->tasks));
1682 
1683 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1684 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
1685 			if (spdk_unlikely(rc != 0)) {
1686 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1687 					    seq, g_accel_driver->name);
1688 				accel_sequence_set_fail(seq, rc);
1689 			}
1690 			break;
1691 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1692 			/* Get the task again, as the driver might have completed some tasks
1693 			 * synchronously */
1694 			task = TAILQ_FIRST(&seq->tasks);
1695 			if (task == NULL) {
1696 				/* Immediately return here to make sure we don't touch the sequence
1697 				 * after it's completed */
1698 				accel_sequence_complete(seq);
1699 				return;
1700 			}
1701 			/* We don't want to execute the next task through the driver, so we
1702 			 * explicitly omit the INIT state here */
1703 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1704 			break;
1705 		case ACCEL_SEQUENCE_STATE_ERROR:
1706 			/* Immediately return here to make sure we don't touch the sequence
1707 			 * after it's completed */
1708 			assert(seq->status != 0);
1709 			accel_sequence_complete(seq);
1710 			return;
1711 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1712 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1713 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1714 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1715 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1716 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1717 			break;
1718 		default:
1719 			assert(0 && "bad state");
1720 			break;
1721 		}
1722 	} while (seq->state != state);
1723 
1724 	seq->in_process_sequence = false;
1725 }
1726 
1727 static void
1728 accel_sequence_task_cb(void *cb_arg, int status)
1729 {
1730 	struct spdk_accel_sequence *seq = cb_arg;
1731 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1732 	struct accel_io_channel *accel_ch = seq->ch;
1733 
1734 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1735 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1736 	 * easiest way to prevent this, even though it is a bit hacky.
1737 	 */
1738 	assert(task != NULL);
1739 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1740 
1741 	switch (seq->state) {
1742 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1743 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1744 		if (spdk_unlikely(status != 0)) {
1745 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1746 				    g_opcode_strings[task->op_code], seq);
1747 			accel_sequence_set_fail(seq, status);
1748 		}
1749 
1750 		accel_process_sequence(seq);
1751 		break;
1752 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1753 		assert(g_accel_driver != NULL);
1754 		/* Immediately remove the task from the outstanding list to make sure the next call
1755 		 * to spdk_accel_sequence_first_task() doesn't return it */
1756 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1757 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1758 
1759 		if (spdk_unlikely(status != 0)) {
1760 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1761 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1762 				    g_accel_driver->name);
1763 			/* Update status without using accel_sequence_set_fail() to avoid changing
1764 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1765 			seq->status = status;
1766 		}
1767 		break;
1768 	default:
1769 		assert(0 && "bad state");
1770 		break;
1771 	}
1772 }
1773 
1774 void
1775 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1776 {
1777 	assert(g_accel_driver != NULL);
1778 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1779 
1780 	if (spdk_likely(seq->status == 0)) {
1781 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1782 	} else {
1783 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1784 	}
1785 
1786 	accel_process_sequence(seq);
1787 }
1788 
1789 static bool
1790 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1791 {
1792 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1793 	if (iovacnt != iovbcnt) {
1794 		return false;
1795 	}
1796 
1797 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1798 }
1799 
1800 static bool
1801 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1802 {
1803 	struct spdk_accel_task *prev;
1804 
1805 	switch (task->op_code) {
1806 	case SPDK_ACCEL_OPC_DECOMPRESS:
1807 	case SPDK_ACCEL_OPC_FILL:
1808 	case SPDK_ACCEL_OPC_ENCRYPT:
1809 	case SPDK_ACCEL_OPC_DECRYPT:
1810 		if (task->dst_domain != next->src_domain) {
1811 			return false;
1812 		}
1813 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1814 					next->s.iovs, next->s.iovcnt)) {
1815 			return false;
1816 		}
1817 		task->d.iovs = next->d.iovs;
1818 		task->d.iovcnt = next->d.iovcnt;
1819 		task->dst_domain = next->dst_domain;
1820 		task->dst_domain_ctx = next->dst_domain_ctx;
1821 		break;
1822 	case SPDK_ACCEL_OPC_CRC32C:
1823 		/* crc32 is special, because it doesn't have a dst buffer */
1824 		if (task->src_domain != next->src_domain) {
1825 			return false;
1826 		}
1827 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
1828 					next->s.iovs, next->s.iovcnt)) {
1829 			return false;
1830 		}
1831 		/* We can only change crc32's buffer if we can change previous task's buffer */
1832 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
1833 		if (prev == NULL) {
1834 			return false;
1835 		}
1836 		if (!accel_task_set_dstbuf(prev, next)) {
1837 			return false;
1838 		}
1839 		task->s.iovs = next->d.iovs;
1840 		task->s.iovcnt = next->d.iovcnt;
1841 		task->src_domain = next->dst_domain;
1842 		task->src_domain_ctx = next->dst_domain_ctx;
1843 		break;
1844 	default:
1845 		return false;
1846 	}
1847 
1848 	return true;
1849 }
1850 
1851 static void
1852 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1853 			   struct spdk_accel_task **next_task)
1854 {
1855 	struct spdk_accel_task *next = *next_task;
1856 
1857 	switch (task->op_code) {
1858 	case SPDK_ACCEL_OPC_COPY:
1859 		/* We only allow changing src of operations that actually have a src, e.g. we never
1860 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1861 		 * change the src of the operation after fill (which in turn could also be a fill).
1862 		 * So, for the sake of simplicity, skip this type of operations for now.
1863 		 */
1864 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
1865 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
1866 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
1867 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
1868 		    next->op_code != SPDK_ACCEL_OPC_CRC32C) {
1869 			break;
1870 		}
1871 		if (task->dst_domain != next->src_domain) {
1872 			break;
1873 		}
1874 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1875 					next->s.iovs, next->s.iovcnt)) {
1876 			break;
1877 		}
1878 		next->s.iovs = task->s.iovs;
1879 		next->s.iovcnt = task->s.iovcnt;
1880 		next->src_domain = task->src_domain;
1881 		next->src_domain_ctx = task->src_domain_ctx;
1882 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1883 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1884 		break;
1885 	case SPDK_ACCEL_OPC_DECOMPRESS:
1886 	case SPDK_ACCEL_OPC_FILL:
1887 	case SPDK_ACCEL_OPC_ENCRYPT:
1888 	case SPDK_ACCEL_OPC_DECRYPT:
1889 	case SPDK_ACCEL_OPC_CRC32C:
1890 		/* We can only merge tasks when one of them is a copy */
1891 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
1892 			break;
1893 		}
1894 		if (!accel_task_set_dstbuf(task, next)) {
1895 			break;
1896 		}
1897 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1898 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1899 		*next_task = TAILQ_NEXT(next, seq_link);
1900 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1901 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1902 		break;
1903 	default:
1904 		assert(0 && "bad opcode");
1905 		break;
1906 	}
1907 }
1908 
1909 void
1910 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1911 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1912 {
1913 	struct spdk_accel_task *task, *next;
1914 
1915 	/* Try to remove any copy operations if possible */
1916 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1917 		if (next == NULL) {
1918 			break;
1919 		}
1920 		accel_sequence_merge_tasks(seq, task, &next);
1921 	}
1922 
1923 	seq->cb_fn = cb_fn;
1924 	seq->cb_arg = cb_arg;
1925 
1926 	accel_process_sequence(seq);
1927 }
1928 
1929 void
1930 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1931 {
1932 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1933 	struct spdk_accel_task *task;
1934 
1935 	assert(TAILQ_EMPTY(&seq->completed));
1936 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1937 
1938 	while (!TAILQ_EMPTY(&tasks)) {
1939 		task = TAILQ_FIRST(&tasks);
1940 		TAILQ_REMOVE(&tasks, task, seq_link);
1941 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1942 	}
1943 }
1944 
1945 void
1946 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1947 {
1948 	if (seq == NULL) {
1949 		return;
1950 	}
1951 
1952 	accel_sequence_complete_tasks(seq);
1953 	accel_sequence_put(seq);
1954 }
1955 
1956 struct spdk_memory_domain *
1957 spdk_accel_get_memory_domain(void)
1958 {
1959 	return g_accel_domain;
1960 }
1961 
1962 static struct spdk_accel_module_if *
1963 _module_find_by_name(const char *name)
1964 {
1965 	struct spdk_accel_module_if *accel_module = NULL;
1966 
1967 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1968 		if (strcmp(name, accel_module->name) == 0) {
1969 			break;
1970 		}
1971 	}
1972 
1973 	return accel_module;
1974 }
1975 
1976 static inline struct spdk_accel_crypto_key *
1977 _accel_crypto_key_get(const char *name)
1978 {
1979 	struct spdk_accel_crypto_key *key;
1980 
1981 	assert(spdk_spin_held(&g_keyring_spin));
1982 
1983 	TAILQ_FOREACH(key, &g_keyring, link) {
1984 		if (strcmp(name, key->param.key_name) == 0) {
1985 			return key;
1986 		}
1987 	}
1988 
1989 	return NULL;
1990 }
1991 
1992 static void
1993 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1994 {
1995 	if (key->param.hex_key) {
1996 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1997 		free(key->param.hex_key);
1998 	}
1999 	if (key->param.hex_key2) {
2000 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2001 		free(key->param.hex_key2);
2002 	}
2003 	free(key->param.tweak_mode);
2004 	free(key->param.key_name);
2005 	free(key->param.cipher);
2006 	if (key->key) {
2007 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2008 		free(key->key);
2009 	}
2010 	if (key->key2) {
2011 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2012 		free(key->key2);
2013 	}
2014 	free(key);
2015 }
2016 
2017 static void
2018 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2019 {
2020 	assert(key->module_if);
2021 	assert(key->module_if->crypto_key_deinit);
2022 
2023 	key->module_if->crypto_key_deinit(key);
2024 	accel_crypto_key_free_mem(key);
2025 }
2026 
2027 /*
2028  * This function mitigates a timing side channel which could be caused by using strcmp()
2029  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2030  * the article [1] for more details
2031  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2032  */
2033 static bool
2034 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2035 {
2036 	size_t i;
2037 	volatile size_t x = k1_len ^ k2_len;
2038 
2039 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2040 		x |= k1[i] ^ k2[i];
2041 	}
2042 
2043 	return x == 0;
2044 }
2045 
2046 static const char *g_tweak_modes[] = {
2047 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2048 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2049 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2050 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2051 };
2052 
2053 static const char *g_ciphers[] = {
2054 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2055 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2056 };
2057 
2058 int
2059 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2060 {
2061 	struct spdk_accel_module_if *module;
2062 	struct spdk_accel_crypto_key *key;
2063 	size_t hex_key_size, hex_key2_size;
2064 	bool found = false;
2065 	size_t i;
2066 	int rc;
2067 
2068 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2069 		return -EINVAL;
2070 	}
2071 
2072 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2073 		/* hardly ever possible, but let's check and warn the user */
2074 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2075 	}
2076 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2077 
2078 	if (!module) {
2079 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2080 		return -ENOENT;
2081 	}
2082 
2083 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2084 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2085 		return -ENOTSUP;
2086 	}
2087 
2088 	key = calloc(1, sizeof(*key));
2089 	if (!key) {
2090 		return -ENOMEM;
2091 	}
2092 
2093 	key->param.key_name = strdup(param->key_name);
2094 	if (!key->param.key_name) {
2095 		rc = -ENOMEM;
2096 		goto error;
2097 	}
2098 
2099 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2100 		assert(g_ciphers[i]);
2101 
2102 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2103 			key->cipher = i;
2104 			found = true;
2105 			break;
2106 		}
2107 	}
2108 
2109 	if (!found) {
2110 		SPDK_ERRLOG("Failed to parse cipher\n");
2111 		rc = -EINVAL;
2112 		goto error;
2113 	}
2114 
2115 	key->param.cipher = strdup(param->cipher);
2116 	if (!key->param.cipher) {
2117 		rc = -ENOMEM;
2118 		goto error;
2119 	}
2120 
2121 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2122 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2123 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2124 		rc = -EINVAL;
2125 		goto error;
2126 	}
2127 
2128 	if (hex_key_size == 0) {
2129 		SPDK_ERRLOG("key1 size cannot be 0\n");
2130 		rc = -EINVAL;
2131 		goto error;
2132 	}
2133 
2134 	key->param.hex_key = strdup(param->hex_key);
2135 	if (!key->param.hex_key) {
2136 		rc = -ENOMEM;
2137 		goto error;
2138 	}
2139 
2140 	key->key_size = hex_key_size / 2;
2141 	key->key = spdk_unhexlify(key->param.hex_key);
2142 	if (!key->key) {
2143 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2144 		rc = -EINVAL;
2145 		goto error;
2146 	}
2147 
2148 	if (param->hex_key2) {
2149 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2150 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2151 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2152 			rc = -EINVAL;
2153 			goto error;
2154 		}
2155 
2156 		if (hex_key2_size == 0) {
2157 			SPDK_ERRLOG("key2 size cannot be 0\n");
2158 			rc = -EINVAL;
2159 			goto error;
2160 		}
2161 
2162 		key->param.hex_key2 = strdup(param->hex_key2);
2163 		if (!key->param.hex_key2) {
2164 			rc = -ENOMEM;
2165 			goto error;
2166 		}
2167 
2168 		key->key2_size = hex_key2_size / 2;
2169 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2170 		if (!key->key2) {
2171 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2172 			rc = -EINVAL;
2173 			goto error;
2174 		}
2175 	}
2176 
2177 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2178 	if (param->tweak_mode) {
2179 		found = false;
2180 
2181 		key->param.tweak_mode = strdup(param->tweak_mode);
2182 		if (!key->param.tweak_mode) {
2183 			rc = -ENOMEM;
2184 			goto error;
2185 		}
2186 
2187 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2188 			assert(g_tweak_modes[i]);
2189 
2190 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2191 				key->tweak_mode = i;
2192 				found = true;
2193 				break;
2194 			}
2195 		}
2196 
2197 		if (!found) {
2198 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2199 			rc = -EINVAL;
2200 			goto error;
2201 		}
2202 	}
2203 
2204 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2205 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2206 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2207 			    g_tweak_modes[key->tweak_mode]);
2208 		rc = -EINVAL;
2209 		goto error;
2210 	}
2211 
2212 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2213 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2214 			    g_ciphers[key->cipher], key->key_size);
2215 		rc = -EINVAL;
2216 		goto error;
2217 	}
2218 
2219 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2220 		if (!key->key2) {
2221 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2222 			rc = -EINVAL;
2223 			goto error;
2224 		}
2225 
2226 		if (key->key_size != key->key2_size) {
2227 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2228 				    key->key_size,
2229 				    key->key2_size);
2230 			rc = -EINVAL;
2231 			goto error;
2232 		}
2233 
2234 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2235 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2236 			rc = -EINVAL;
2237 			goto error;
2238 		}
2239 	}
2240 
2241 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2242 		if (key->key2_size) {
2243 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2244 			rc = -EINVAL;
2245 			goto error;
2246 		}
2247 	}
2248 
2249 	key->module_if = module;
2250 
2251 	spdk_spin_lock(&g_keyring_spin);
2252 	if (_accel_crypto_key_get(param->key_name)) {
2253 		rc = -EEXIST;
2254 	} else {
2255 		rc = module->crypto_key_init(key);
2256 		if (rc) {
2257 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2258 		} else {
2259 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2260 		}
2261 	}
2262 	spdk_spin_unlock(&g_keyring_spin);
2263 
2264 	if (rc) {
2265 		goto error;
2266 	}
2267 
2268 	return 0;
2269 
2270 error:
2271 	accel_crypto_key_free_mem(key);
2272 	return rc;
2273 }
2274 
2275 int
2276 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2277 {
2278 	if (!key || !key->module_if) {
2279 		return -EINVAL;
2280 	}
2281 
2282 	spdk_spin_lock(&g_keyring_spin);
2283 	if (!_accel_crypto_key_get(key->param.key_name)) {
2284 		spdk_spin_unlock(&g_keyring_spin);
2285 		return -ENOENT;
2286 	}
2287 	TAILQ_REMOVE(&g_keyring, key, link);
2288 	spdk_spin_unlock(&g_keyring_spin);
2289 
2290 	accel_crypto_key_destroy_unsafe(key);
2291 
2292 	return 0;
2293 }
2294 
2295 struct spdk_accel_crypto_key *
2296 spdk_accel_crypto_key_get(const char *name)
2297 {
2298 	struct spdk_accel_crypto_key *key;
2299 
2300 	spdk_spin_lock(&g_keyring_spin);
2301 	key = _accel_crypto_key_get(name);
2302 	spdk_spin_unlock(&g_keyring_spin);
2303 
2304 	return key;
2305 }
2306 
2307 /* Helper function when accel modules register with the framework. */
2308 void
2309 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2310 {
2311 	struct spdk_accel_module_if *tmp;
2312 
2313 	if (_module_find_by_name(accel_module->name)) {
2314 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2315 		assert(false);
2316 		return;
2317 	}
2318 
2319 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2320 		if (accel_module->priority < tmp->priority) {
2321 			break;
2322 		}
2323 	}
2324 
2325 	if (tmp != NULL) {
2326 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2327 	} else {
2328 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2329 	}
2330 }
2331 
2332 /* Framework level channel create callback. */
2333 static int
2334 accel_create_channel(void *io_device, void *ctx_buf)
2335 {
2336 	struct accel_io_channel	*accel_ch = ctx_buf;
2337 	struct spdk_accel_task *accel_task;
2338 	struct spdk_accel_sequence *seq;
2339 	struct accel_buffer *buf;
2340 	uint8_t *task_mem;
2341 	uint32_t i = 0, j;
2342 	int rc;
2343 
2344 	accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size);
2345 	if (accel_ch->task_pool_base == NULL) {
2346 		return -ENOMEM;
2347 	}
2348 
2349 	accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence));
2350 	if (accel_ch->seq_pool_base == NULL) {
2351 		goto err;
2352 	}
2353 
2354 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2355 	if (accel_ch->buf_pool_base == NULL) {
2356 		goto err;
2357 	}
2358 
2359 	TAILQ_INIT(&accel_ch->task_pool);
2360 	TAILQ_INIT(&accel_ch->seq_pool);
2361 	TAILQ_INIT(&accel_ch->buf_pool);
2362 
2363 	task_mem = accel_ch->task_pool_base;
2364 	for (i = 0; i < g_opts.task_count; i++) {
2365 		accel_task = (struct spdk_accel_task *)task_mem;
2366 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2367 		task_mem += g_max_accel_module_size;
2368 	}
2369 	for (i = 0; i < g_opts.sequence_count; i++) {
2370 		seq = &accel_ch->seq_pool_base[i];
2371 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2372 	}
2373 	for (i = 0; i < g_opts.buf_count; i++) {
2374 		buf = &accel_ch->buf_pool_base[i];
2375 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2376 	}
2377 
2378 	/* Assign modules and get IO channels for each */
2379 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2380 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2381 		/* This can happen if idxd runs out of channels. */
2382 		if (accel_ch->module_ch[i] == NULL) {
2383 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2384 			goto err;
2385 		}
2386 	}
2387 
2388 	if (g_accel_driver != NULL) {
2389 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2390 		if (accel_ch->driver_channel == NULL) {
2391 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2392 			goto err;
2393 		}
2394 	}
2395 
2396 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2397 				     g_opts.large_cache_size);
2398 	if (rc != 0) {
2399 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2400 		goto err;
2401 	}
2402 
2403 	return 0;
2404 err:
2405 	if (accel_ch->driver_channel != NULL) {
2406 		spdk_put_io_channel(accel_ch->driver_channel);
2407 	}
2408 	for (j = 0; j < i; j++) {
2409 		spdk_put_io_channel(accel_ch->module_ch[j]);
2410 	}
2411 	free(accel_ch->task_pool_base);
2412 	free(accel_ch->seq_pool_base);
2413 	free(accel_ch->buf_pool_base);
2414 
2415 	return -ENOMEM;
2416 }
2417 
2418 static void
2419 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2420 {
2421 	int i;
2422 
2423 	total->sequence_executed += stats->sequence_executed;
2424 	total->sequence_failed += stats->sequence_failed;
2425 	total->retry.task += stats->retry.task;
2426 	total->retry.sequence += stats->retry.sequence;
2427 	total->retry.iobuf += stats->retry.iobuf;
2428 	total->retry.bufdesc += stats->retry.bufdesc;
2429 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2430 		total->operations[i].executed += stats->operations[i].executed;
2431 		total->operations[i].failed += stats->operations[i].failed;
2432 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2433 	}
2434 }
2435 
2436 /* Framework level channel destroy callback. */
2437 static void
2438 accel_destroy_channel(void *io_device, void *ctx_buf)
2439 {
2440 	struct accel_io_channel	*accel_ch = ctx_buf;
2441 	int i;
2442 
2443 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2444 
2445 	if (accel_ch->driver_channel != NULL) {
2446 		spdk_put_io_channel(accel_ch->driver_channel);
2447 	}
2448 
2449 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2450 		assert(accel_ch->module_ch[i] != NULL);
2451 		spdk_put_io_channel(accel_ch->module_ch[i]);
2452 		accel_ch->module_ch[i] = NULL;
2453 	}
2454 
2455 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2456 	spdk_spin_lock(&g_stats_lock);
2457 	accel_add_stats(&g_stats, &accel_ch->stats);
2458 	spdk_spin_unlock(&g_stats_lock);
2459 
2460 	free(accel_ch->task_pool_base);
2461 	free(accel_ch->seq_pool_base);
2462 	free(accel_ch->buf_pool_base);
2463 }
2464 
2465 struct spdk_io_channel *
2466 spdk_accel_get_io_channel(void)
2467 {
2468 	return spdk_get_io_channel(&spdk_accel_module_list);
2469 }
2470 
2471 static int
2472 accel_module_initialize(void)
2473 {
2474 	struct spdk_accel_module_if *accel_module, *tmp_module;
2475 	int rc = 0, module_rc;
2476 
2477 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2478 		module_rc = accel_module->module_init();
2479 		if (module_rc) {
2480 			SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2481 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2482 			if (!rc) {
2483 				rc = module_rc;
2484 			}
2485 		}
2486 
2487 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2488 	}
2489 
2490 	return rc;
2491 }
2492 
2493 static void
2494 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2495 {
2496 	struct accel_module *module = &g_modules_opc[opcode];
2497 	struct spdk_accel_module_if *module_if = module->module;
2498 
2499 	if (module_if->get_memory_domains != NULL) {
2500 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2501 	}
2502 }
2503 
2504 int
2505 spdk_accel_initialize(void)
2506 {
2507 	enum spdk_accel_opcode op;
2508 	struct spdk_accel_module_if *accel_module = NULL;
2509 	int rc;
2510 
2511 	/*
2512 	 * We need a unique identifier for the accel framework, so use the
2513 	 * spdk_accel_module_list address for this purpose.
2514 	 */
2515 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2516 				sizeof(struct accel_io_channel), "accel");
2517 
2518 	spdk_spin_init(&g_keyring_spin);
2519 	spdk_spin_init(&g_stats_lock);
2520 
2521 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2522 				       "SPDK_ACCEL_DMA_DEVICE");
2523 	if (rc != 0) {
2524 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2525 		return rc;
2526 	}
2527 
2528 	g_modules_started = true;
2529 	rc = accel_module_initialize();
2530 	if (rc) {
2531 		return rc;
2532 	}
2533 
2534 	/* The module list is order by priority, with the highest priority modules being at the end
2535 	 * of the list.  The software module should be somewhere at the beginning of the list,
2536 	 * before all HW modules.
2537 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
2538 	 * initialized to support the operation.
2539 	 */
2540 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2541 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2542 			if (accel_module->supports_opcode(op)) {
2543 				g_modules_opc[op].module = accel_module;
2544 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2545 			}
2546 		}
2547 
2548 		if (accel_module->get_ctx_size != NULL) {
2549 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
2550 							   accel_module->get_ctx_size());
2551 		}
2552 	}
2553 
2554 	/* Now lets check for overrides and apply all that exist */
2555 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2556 		if (g_modules_opc_override[op] != NULL) {
2557 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2558 			if (accel_module == NULL) {
2559 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2560 				return -EINVAL;
2561 			}
2562 			if (accel_module->supports_opcode(op) == false) {
2563 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2564 				return -EINVAL;
2565 			}
2566 			g_modules_opc[op].module = accel_module;
2567 		}
2568 	}
2569 
2570 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2571 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2572 		return -EINVAL;
2573 	}
2574 
2575 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2576 		assert(g_modules_opc[op].module != NULL);
2577 		accel_module_init_opcode(op);
2578 	}
2579 
2580 	rc = spdk_iobuf_register_module("accel");
2581 	if (rc != 0) {
2582 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2583 		return rc;
2584 	}
2585 
2586 	return 0;
2587 }
2588 
2589 static void
2590 accel_module_finish_cb(void)
2591 {
2592 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2593 
2594 	cb_fn(g_fini_cb_arg);
2595 	g_fini_cb_fn = NULL;
2596 	g_fini_cb_arg = NULL;
2597 }
2598 
2599 static void
2600 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2601 			   const char *module_str)
2602 {
2603 	spdk_json_write_object_begin(w);
2604 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2605 	spdk_json_write_named_object_begin(w, "params");
2606 	spdk_json_write_named_string(w, "opname", opc_str);
2607 	spdk_json_write_named_string(w, "module", module_str);
2608 	spdk_json_write_object_end(w);
2609 	spdk_json_write_object_end(w);
2610 }
2611 
2612 static void
2613 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2614 {
2615 	spdk_json_write_named_string(w, "name", key->param.key_name);
2616 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2617 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2618 	if (key->param.hex_key2) {
2619 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2620 	}
2621 
2622 	if (key->param.tweak_mode) {
2623 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2624 	}
2625 }
2626 
2627 void
2628 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2629 {
2630 	spdk_json_write_object_begin(w);
2631 	__accel_crypto_key_dump_param(w, key);
2632 	spdk_json_write_object_end(w);
2633 }
2634 
2635 static void
2636 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2637 				    struct spdk_accel_crypto_key *key)
2638 {
2639 	spdk_json_write_object_begin(w);
2640 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2641 	spdk_json_write_named_object_begin(w, "params");
2642 	__accel_crypto_key_dump_param(w, key);
2643 	spdk_json_write_object_end(w);
2644 	spdk_json_write_object_end(w);
2645 }
2646 
2647 static void
2648 accel_write_options(struct spdk_json_write_ctx *w)
2649 {
2650 	spdk_json_write_object_begin(w);
2651 	spdk_json_write_named_string(w, "method", "accel_set_options");
2652 	spdk_json_write_named_object_begin(w, "params");
2653 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2654 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2655 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2656 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2657 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2658 	spdk_json_write_object_end(w);
2659 	spdk_json_write_object_end(w);
2660 }
2661 
2662 static void
2663 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2664 {
2665 	struct spdk_accel_crypto_key *key;
2666 
2667 	spdk_spin_lock(&g_keyring_spin);
2668 	TAILQ_FOREACH(key, &g_keyring, link) {
2669 		if (full_dump) {
2670 			_accel_crypto_key_write_config_json(w, key);
2671 		} else {
2672 			_accel_crypto_key_dump_param(w, key);
2673 		}
2674 	}
2675 	spdk_spin_unlock(&g_keyring_spin);
2676 }
2677 
2678 void
2679 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2680 {
2681 	_accel_crypto_keys_write_config_json(w, false);
2682 }
2683 
2684 void
2685 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2686 {
2687 	struct spdk_accel_module_if *accel_module;
2688 	int i;
2689 
2690 	spdk_json_write_array_begin(w);
2691 	accel_write_options(w);
2692 
2693 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2694 		if (accel_module->write_config_json) {
2695 			accel_module->write_config_json(w);
2696 		}
2697 	}
2698 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2699 		if (g_modules_opc_override[i]) {
2700 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2701 		}
2702 	}
2703 
2704 	_accel_crypto_keys_write_config_json(w, true);
2705 
2706 	spdk_json_write_array_end(w);
2707 }
2708 
2709 void
2710 spdk_accel_module_finish(void)
2711 {
2712 	if (!g_accel_module) {
2713 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2714 	} else {
2715 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2716 	}
2717 
2718 	if (!g_accel_module) {
2719 		spdk_spin_destroy(&g_keyring_spin);
2720 		spdk_spin_destroy(&g_stats_lock);
2721 		if (g_accel_domain) {
2722 			spdk_memory_domain_destroy(g_accel_domain);
2723 			g_accel_domain = NULL;
2724 		}
2725 		accel_module_finish_cb();
2726 		return;
2727 	}
2728 
2729 	if (g_accel_module->module_fini) {
2730 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2731 	} else {
2732 		spdk_accel_module_finish();
2733 	}
2734 }
2735 
2736 static void
2737 accel_io_device_unregister_cb(void *io_device)
2738 {
2739 	struct spdk_accel_crypto_key *key, *key_tmp;
2740 	enum spdk_accel_opcode op;
2741 
2742 	spdk_spin_lock(&g_keyring_spin);
2743 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2744 		accel_crypto_key_destroy_unsafe(key);
2745 	}
2746 	spdk_spin_unlock(&g_keyring_spin);
2747 
2748 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
2749 		if (g_modules_opc_override[op] != NULL) {
2750 			free(g_modules_opc_override[op]);
2751 			g_modules_opc_override[op] = NULL;
2752 		}
2753 		g_modules_opc[op].module = NULL;
2754 	}
2755 
2756 	spdk_accel_module_finish();
2757 }
2758 
2759 void
2760 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2761 {
2762 	assert(cb_fn != NULL);
2763 
2764 	g_fini_cb_fn = cb_fn;
2765 	g_fini_cb_arg = cb_arg;
2766 
2767 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
2768 }
2769 
2770 static struct spdk_accel_driver *
2771 accel_find_driver(const char *name)
2772 {
2773 	struct spdk_accel_driver *driver;
2774 
2775 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2776 		if (strcmp(driver->name, name) == 0) {
2777 			return driver;
2778 		}
2779 	}
2780 
2781 	return NULL;
2782 }
2783 
2784 int
2785 spdk_accel_set_driver(const char *name)
2786 {
2787 	struct spdk_accel_driver *driver;
2788 
2789 	driver = accel_find_driver(name);
2790 	if (driver == NULL) {
2791 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2792 		return -ENODEV;
2793 	}
2794 
2795 	g_accel_driver = driver;
2796 
2797 	return 0;
2798 }
2799 
2800 void
2801 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2802 {
2803 	if (accel_find_driver(driver->name)) {
2804 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2805 		assert(0);
2806 		return;
2807 	}
2808 
2809 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2810 }
2811 
2812 int
2813 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2814 {
2815 	if (opts->size > sizeof(*opts)) {
2816 		return -EINVAL;
2817 	}
2818 
2819 	memcpy(&g_opts, opts, opts->size);
2820 
2821 	return 0;
2822 }
2823 
2824 void
2825 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2826 {
2827 	size_t size = opts->size;
2828 
2829 	assert(size <= sizeof(*opts));
2830 
2831 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2832 	opts->size = size;
2833 }
2834 
2835 struct accel_get_stats_ctx {
2836 	struct accel_stats	stats;
2837 	accel_get_stats_cb	cb_fn;
2838 	void			*cb_arg;
2839 };
2840 
2841 static void
2842 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2843 {
2844 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2845 
2846 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2847 	free(ctx);
2848 }
2849 
2850 static void
2851 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2852 {
2853 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2854 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2855 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2856 
2857 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2858 	spdk_for_each_channel_continue(iter, 0);
2859 }
2860 
2861 int
2862 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2863 {
2864 	struct accel_get_stats_ctx *ctx;
2865 
2866 	ctx = calloc(1, sizeof(*ctx));
2867 	if (ctx == NULL) {
2868 		return -ENOMEM;
2869 	}
2870 
2871 	spdk_spin_lock(&g_stats_lock);
2872 	accel_add_stats(&ctx->stats, &g_stats);
2873 	spdk_spin_unlock(&g_stats_lock);
2874 
2875 	ctx->cb_fn = cb_fn;
2876 	ctx->cb_arg = cb_arg;
2877 
2878 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2879 			      accel_get_channel_stats_done);
2880 
2881 	return 0;
2882 }
2883 
2884 void
2885 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
2886 			    struct spdk_accel_opcode_stats *stats, size_t size)
2887 {
2888 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2889 
2890 #define FIELD_OK(field) \
2891 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
2892 
2893 #define SET_FIELD(field, value) \
2894 	if (FIELD_OK(field)) { \
2895 		stats->field = value; \
2896 	}
2897 
2898 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
2899 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
2900 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
2901 
2902 #undef FIELD_OK
2903 #undef SET_FIELD
2904 }
2905 
2906 uint8_t
2907 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
2908 			 const struct spdk_accel_operation_exec_ctx *ctx)
2909 {
2910 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
2911 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
2912 
2913 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
2914 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
2915 	}
2916 
2917 	if (module->get_operation_info != NULL) {
2918 		module->get_operation_info(opcode, ctx, &modinfo);
2919 	}
2920 
2921 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
2922 	 * fall back to accel_sw, which doesn't have any alignment requiremenets.  However, to be
2923 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
2924 	 * hardware module. */
2925 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
2926 }
2927 
2928 struct spdk_accel_module_if *
2929 spdk_accel_get_module(const char *name)
2930 {
2931 	struct spdk_accel_module_if *module;
2932 
2933 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
2934 		if (strcmp(module->name, name) == 0) {
2935 			return module;
2936 		}
2937 	}
2938 
2939 	return NULL;
2940 }
2941 
2942 SPDK_LOG_REGISTER_COMPONENT(accel)
2943