xref: /spdk/lib/accel/accel.c (revision cfa0a248e28dc42bd51b24c4d4ab64e0b5dd7854)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define ACCEL_TASKS_PER_CHANNEL		2048
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT	8
41 
42 struct accel_module {
43 	struct spdk_accel_module_if	*module;
44 	bool				supports_memory_domains;
45 };
46 
47 /* Largest context size for all accel modules */
48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
49 
50 static struct spdk_accel_module_if *g_accel_module = NULL;
51 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
52 static void *g_fini_cb_arg = NULL;
53 static bool g_modules_started = false;
54 static struct spdk_memory_domain *g_accel_domain;
55 
56 /* Global list of registered accelerator modules */
57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
58 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
59 
60 /* Crypto keyring */
61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
62 static struct spdk_spinlock g_keyring_spin;
63 
64 /* Global array mapping capabilities to modules */
65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
68 static struct spdk_accel_driver *g_accel_driver;
69 static struct spdk_accel_opts g_opts = {
70 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
71 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
72 	.task_count = ACCEL_TASKS_PER_CHANNEL,
73 	.sequence_count = ACCEL_TASKS_PER_CHANNEL,
74 	.buf_count = ACCEL_TASKS_PER_CHANNEL,
75 };
76 static struct accel_stats g_stats;
77 static struct spdk_spinlock g_stats_lock;
78 
79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
80 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
81 	"compress", "decompress", "encrypt", "decrypt", "xor",
82 	"dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy",
83 	"dix_generate", "dix_verify"
84 };
85 
86 enum accel_sequence_state {
87 	ACCEL_SEQUENCE_STATE_INIT,
88 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
89 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
90 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
91 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
92 	ACCEL_SEQUENCE_STATE_PULL_DATA,
93 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
94 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
95 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
96 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
97 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
98 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
99 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
100 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
101 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
102 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
103 	ACCEL_SEQUENCE_STATE_ERROR,
104 	ACCEL_SEQUENCE_STATE_MAX,
105 };
106 
107 static const char *g_seq_states[]
108 __attribute__((unused)) = {
109 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
110 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
112 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
113 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
114 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
116 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
117 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
118 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
119 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
120 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
121 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
123 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
124 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
125 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
126 	[ACCEL_SEQUENCE_STATE_MAX] = "",
127 };
128 
129 #define ACCEL_SEQUENCE_STATE_STRING(s) \
130 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
131 	 ? g_seq_states[s] : "unknown")
132 
133 struct accel_buffer {
134 	struct spdk_accel_sequence	*seq;
135 	void				*buf;
136 	uint64_t			len;
137 	struct spdk_iobuf_entry		iobuf;
138 	spdk_accel_sequence_get_buf_cb	cb_fn;
139 	void				*cb_ctx;
140 	SLIST_ENTRY(accel_buffer)	link;
141 	struct accel_io_channel		*ch;
142 };
143 
144 struct accel_io_channel {
145 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
146 	struct spdk_io_channel			*driver_channel;
147 	void					*task_pool_base;
148 	struct spdk_accel_sequence		*seq_pool_base;
149 	struct accel_buffer			*buf_pool_base;
150 	struct spdk_accel_task_aux_data		*task_aux_data_base;
151 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
152 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
153 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
154 	SLIST_HEAD(, accel_buffer)		buf_pool;
155 	struct spdk_iobuf_channel		iobuf;
156 	struct accel_stats			stats;
157 };
158 
159 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
160 
161 struct spdk_accel_sequence {
162 	struct accel_io_channel			*ch;
163 	struct accel_sequence_tasks		tasks;
164 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
165 	int					status;
166 	/* state uses enum accel_sequence_state */
167 	uint8_t					state;
168 	bool					in_process_sequence;
169 	spdk_accel_completion_cb		cb_fn;
170 	void					*cb_arg;
171 	SLIST_ENTRY(spdk_accel_sequence)	link;
172 };
173 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
174 
175 #define accel_update_stats(ch, event, v) \
176 	do { \
177 		(ch)->stats.event += (v); \
178 	} while (0)
179 
180 #define accel_update_task_stats(ch, task, event, v) \
181 	accel_update_stats(ch, operations[(task)->op_code].event, v)
182 
183 static inline void accel_sequence_task_cb(void *cb_arg, int status);
184 
185 static inline void
186 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
187 {
188 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
189 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
190 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
191 	seq->state = state;
192 }
193 
194 static void
195 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
196 {
197 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
198 	assert(status != 0);
199 	seq->status = status;
200 }
201 
202 int
203 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
204 {
205 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
206 		/* invalid opcode */
207 		return -EINVAL;
208 	}
209 
210 	if (g_modules_opc[opcode].module) {
211 		*module_name = g_modules_opc[opcode].module->name;
212 	} else {
213 		return -ENOENT;
214 	}
215 
216 	return 0;
217 }
218 
219 void
220 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
221 {
222 	struct spdk_accel_module_if *accel_module;
223 	enum spdk_accel_opcode opcode;
224 	int j = 0;
225 
226 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
227 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
228 			if (accel_module->supports_opcode(opcode)) {
229 				info->ops[j] = opcode;
230 				j++;
231 			}
232 		}
233 		info->name = accel_module->name;
234 		info->num_ops = j;
235 		fn(info);
236 		j = 0;
237 	}
238 }
239 
240 const char *
241 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
242 {
243 	if (opcode < SPDK_ACCEL_OPC_LAST) {
244 		return g_opcode_strings[opcode];
245 	}
246 
247 	return NULL;
248 }
249 
250 int
251 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
252 {
253 	char *copy;
254 
255 	if (g_modules_started == true) {
256 		/* we don't allow re-assignment once things have started */
257 		return -EINVAL;
258 	}
259 
260 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
261 		/* invalid opcode */
262 		return -EINVAL;
263 	}
264 
265 	copy = strdup(name);
266 	if (copy == NULL) {
267 		return -ENOMEM;
268 	}
269 
270 	/* module selection will be validated after the framework starts. */
271 	free(g_modules_opc_override[opcode]);
272 	g_modules_opc_override[opcode] = copy;
273 
274 	return 0;
275 }
276 
277 inline static struct spdk_accel_task *
278 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
279 {
280 	struct spdk_accel_task *accel_task;
281 
282 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
283 	if (spdk_unlikely(accel_task == NULL)) {
284 		accel_update_stats(accel_ch, retry.task, 1);
285 		return NULL;
286 	}
287 
288 	accel_update_stats(accel_ch, task_outstanding, 1);
289 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
290 	accel_task->link.stqe_next = NULL;
291 
292 	accel_task->cb_fn = cb_fn;
293 	accel_task->cb_arg = cb_arg;
294 	accel_task->accel_ch = accel_ch;
295 	accel_task->s.iovs = NULL;
296 	accel_task->d.iovs = NULL;
297 
298 	return accel_task;
299 }
300 
301 static void
302 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task)
303 {
304 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
305 	accel_update_stats(ch, task_outstanding, -1);
306 }
307 
308 void
309 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
310 {
311 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
312 	spdk_accel_completion_cb	cb_fn;
313 	void				*cb_arg;
314 
315 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
316 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
317 	if (spdk_unlikely(status != 0)) {
318 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
319 	}
320 
321 	if (accel_task->seq) {
322 		accel_sequence_task_cb(accel_task->seq, status);
323 		return;
324 	}
325 
326 	cb_fn = accel_task->cb_fn;
327 	cb_arg = accel_task->cb_arg;
328 
329 	if (accel_task->has_aux) {
330 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
331 		accel_task->aux = NULL;
332 		accel_task->has_aux = false;
333 	}
334 
335 	/* We should put the accel_task into the list firstly in order to avoid
336 	 * the accel task list is exhausted when there is recursive call to
337 	 * allocate accel_task in user's call back function (cb_fn)
338 	 */
339 	_put_task(accel_ch, accel_task);
340 
341 	cb_fn(cb_arg, status);
342 }
343 
344 static inline int
345 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
346 {
347 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
348 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
349 	int rc;
350 
351 	rc = module->submit_tasks(module_ch, task);
352 	if (spdk_unlikely(rc != 0)) {
353 		accel_update_task_stats(accel_ch, task, failed, 1);
354 	}
355 
356 	return rc;
357 }
358 
359 static inline uint64_t
360 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
361 {
362 	uint64_t result = 0;
363 	uint32_t i;
364 
365 	for (i = 0; i < iovcnt; ++i) {
366 		result += iovs[i].iov_len;
367 	}
368 
369 	return result;
370 }
371 
372 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
373 do {										\
374         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
375         if (spdk_unlikely(!(task)->aux)) {					\
376                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
377                 _put_task(task->accel_ch, task);				\
378                 assert(0);							\
379                 return -ENOMEM;							\
380         }									\
381         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
382         (task)->has_aux = true;							\
383 } while (0)
384 
385 /* Accel framework public API for copy function */
386 int
387 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
388 		       uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
389 {
390 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
391 	struct spdk_accel_task *accel_task;
392 
393 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
394 	if (spdk_unlikely(accel_task == NULL)) {
395 		return -ENOMEM;
396 	}
397 
398 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
399 
400 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
401 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
402 	accel_task->d.iovs[0].iov_base = dst;
403 	accel_task->d.iovs[0].iov_len = nbytes;
404 	accel_task->d.iovcnt = 1;
405 	accel_task->s.iovs[0].iov_base = src;
406 	accel_task->s.iovs[0].iov_len = nbytes;
407 	accel_task->s.iovcnt = 1;
408 	accel_task->nbytes = nbytes;
409 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
410 	accel_task->src_domain = NULL;
411 	accel_task->dst_domain = NULL;
412 
413 	return accel_submit_task(accel_ch, accel_task);
414 }
415 
416 /* Accel framework public API for dual cast copy function */
417 int
418 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
419 			   void *dst2, void *src, uint64_t nbytes,
420 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
421 {
422 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
423 	struct spdk_accel_task *accel_task;
424 
425 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
426 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
427 		return -EINVAL;
428 	}
429 
430 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
431 	if (spdk_unlikely(accel_task == NULL)) {
432 		return -ENOMEM;
433 	}
434 
435 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
436 
437 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
438 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
439 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
440 	accel_task->d.iovs[0].iov_base = dst1;
441 	accel_task->d.iovs[0].iov_len = nbytes;
442 	accel_task->d.iovcnt = 1;
443 	accel_task->d2.iovs[0].iov_base = dst2;
444 	accel_task->d2.iovs[0].iov_len = nbytes;
445 	accel_task->d2.iovcnt = 1;
446 	accel_task->s.iovs[0].iov_base = src;
447 	accel_task->s.iovs[0].iov_len = nbytes;
448 	accel_task->s.iovcnt = 1;
449 	accel_task->nbytes = nbytes;
450 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
451 	accel_task->src_domain = NULL;
452 	accel_task->dst_domain = NULL;
453 
454 	return accel_submit_task(accel_ch, accel_task);
455 }
456 
457 /* Accel framework public API for compare function */
458 
459 int
460 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
461 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
462 			  void *cb_arg)
463 {
464 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
465 	struct spdk_accel_task *accel_task;
466 
467 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
468 	if (spdk_unlikely(accel_task == NULL)) {
469 		return -ENOMEM;
470 	}
471 
472 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
473 
474 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
475 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
476 	accel_task->s.iovs[0].iov_base = src1;
477 	accel_task->s.iovs[0].iov_len = nbytes;
478 	accel_task->s.iovcnt = 1;
479 	accel_task->s2.iovs[0].iov_base = src2;
480 	accel_task->s2.iovs[0].iov_len = nbytes;
481 	accel_task->s2.iovcnt = 1;
482 	accel_task->nbytes = nbytes;
483 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
484 	accel_task->src_domain = NULL;
485 	accel_task->dst_domain = NULL;
486 
487 	return accel_submit_task(accel_ch, accel_task);
488 }
489 
490 /* Accel framework public API for fill function */
491 int
492 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
493 		       uint8_t fill, uint64_t nbytes,
494 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
495 {
496 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
497 	struct spdk_accel_task *accel_task;
498 
499 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
500 	if (spdk_unlikely(accel_task == NULL)) {
501 		return -ENOMEM;
502 	}
503 
504 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
505 
506 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
507 	accel_task->d.iovs[0].iov_base = dst;
508 	accel_task->d.iovs[0].iov_len = nbytes;
509 	accel_task->d.iovcnt = 1;
510 	accel_task->nbytes = nbytes;
511 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
512 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
513 	accel_task->src_domain = NULL;
514 	accel_task->dst_domain = NULL;
515 
516 	return accel_submit_task(accel_ch, accel_task);
517 }
518 
519 /* Accel framework public API for CRC-32C function */
520 int
521 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
522 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
523 			 void *cb_arg)
524 {
525 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
526 	struct spdk_accel_task *accel_task;
527 
528 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
529 	if (spdk_unlikely(accel_task == NULL)) {
530 		return -ENOMEM;
531 	}
532 
533 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
534 
535 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
536 	accel_task->s.iovs[0].iov_base = src;
537 	accel_task->s.iovs[0].iov_len = nbytes;
538 	accel_task->s.iovcnt = 1;
539 	accel_task->nbytes = nbytes;
540 	accel_task->crc_dst = crc_dst;
541 	accel_task->seed = seed;
542 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
543 	accel_task->src_domain = NULL;
544 	accel_task->dst_domain = NULL;
545 
546 	return accel_submit_task(accel_ch, accel_task);
547 }
548 
549 /* Accel framework public API for chained CRC-32C function */
550 int
551 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
552 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
553 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
554 {
555 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
556 	struct spdk_accel_task *accel_task;
557 
558 	if (iov == NULL) {
559 		SPDK_ERRLOG("iov should not be NULL");
560 		return -EINVAL;
561 	}
562 
563 	if (!iov_cnt) {
564 		SPDK_ERRLOG("iovcnt should not be zero value\n");
565 		return -EINVAL;
566 	}
567 
568 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
569 	if (spdk_unlikely(accel_task == NULL)) {
570 		SPDK_ERRLOG("no memory\n");
571 		assert(0);
572 		return -ENOMEM;
573 	}
574 
575 	accel_task->s.iovs = iov;
576 	accel_task->s.iovcnt = iov_cnt;
577 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
578 	accel_task->crc_dst = crc_dst;
579 	accel_task->seed = seed;
580 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
581 	accel_task->src_domain = NULL;
582 	accel_task->dst_domain = NULL;
583 
584 	return accel_submit_task(accel_ch, accel_task);
585 }
586 
587 /* Accel framework public API for copy with CRC-32C function */
588 int
589 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
590 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
591 			      spdk_accel_completion_cb cb_fn, void *cb_arg)
592 {
593 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
594 	struct spdk_accel_task *accel_task;
595 
596 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
597 	if (spdk_unlikely(accel_task == NULL)) {
598 		return -ENOMEM;
599 	}
600 
601 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
602 
603 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
604 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
605 	accel_task->d.iovs[0].iov_base = dst;
606 	accel_task->d.iovs[0].iov_len = nbytes;
607 	accel_task->d.iovcnt = 1;
608 	accel_task->s.iovs[0].iov_base = src;
609 	accel_task->s.iovs[0].iov_len = nbytes;
610 	accel_task->s.iovcnt = 1;
611 	accel_task->nbytes = nbytes;
612 	accel_task->crc_dst = crc_dst;
613 	accel_task->seed = seed;
614 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
615 	accel_task->src_domain = NULL;
616 	accel_task->dst_domain = NULL;
617 
618 	return accel_submit_task(accel_ch, accel_task);
619 }
620 
621 /* Accel framework public API for chained copy + CRC-32C function */
622 int
623 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
624 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
625 			       uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
626 {
627 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
628 	struct spdk_accel_task *accel_task;
629 	uint64_t nbytes;
630 
631 	if (src_iovs == NULL) {
632 		SPDK_ERRLOG("iov should not be NULL");
633 		return -EINVAL;
634 	}
635 
636 	if (!iov_cnt) {
637 		SPDK_ERRLOG("iovcnt should not be zero value\n");
638 		return -EINVAL;
639 	}
640 
641 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
642 	if (spdk_unlikely(accel_task == NULL)) {
643 		SPDK_ERRLOG("no memory\n");
644 		assert(0);
645 		return -ENOMEM;
646 	}
647 
648 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
649 
650 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
651 
652 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
653 	accel_task->d.iovs[0].iov_base = dst;
654 	accel_task->d.iovs[0].iov_len = nbytes;
655 	accel_task->d.iovcnt = 1;
656 	accel_task->s.iovs = src_iovs;
657 	accel_task->s.iovcnt = iov_cnt;
658 	accel_task->nbytes = nbytes;
659 	accel_task->crc_dst = crc_dst;
660 	accel_task->seed = seed;
661 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
662 	accel_task->src_domain = NULL;
663 	accel_task->dst_domain = NULL;
664 
665 	return accel_submit_task(accel_ch, accel_task);
666 }
667 
668 int
669 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
670 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
671 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
672 {
673 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
674 	struct spdk_accel_task *accel_task;
675 
676 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
677 	if (spdk_unlikely(accel_task == NULL)) {
678 		return -ENOMEM;
679 	}
680 
681 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
682 
683 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
684 	accel_task->d.iovs[0].iov_base = dst;
685 	accel_task->d.iovs[0].iov_len = nbytes;
686 	accel_task->d.iovcnt = 1;
687 	accel_task->output_size = output_size;
688 	accel_task->s.iovs = src_iovs;
689 	accel_task->s.iovcnt = src_iovcnt;
690 	accel_task->nbytes = nbytes;
691 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
692 	accel_task->src_domain = NULL;
693 	accel_task->dst_domain = NULL;
694 
695 	return accel_submit_task(accel_ch, accel_task);
696 }
697 
698 int
699 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
700 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
701 			     uint32_t *output_size, spdk_accel_completion_cb cb_fn,
702 			     void *cb_arg)
703 {
704 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
705 	struct spdk_accel_task *accel_task;
706 
707 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
708 	if (spdk_unlikely(accel_task == NULL)) {
709 		return -ENOMEM;
710 	}
711 
712 	accel_task->output_size = output_size;
713 	accel_task->s.iovs = src_iovs;
714 	accel_task->s.iovcnt = src_iovcnt;
715 	accel_task->d.iovs = dst_iovs;
716 	accel_task->d.iovcnt = dst_iovcnt;
717 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
718 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
719 	accel_task->src_domain = NULL;
720 	accel_task->dst_domain = NULL;
721 
722 	return accel_submit_task(accel_ch, accel_task);
723 }
724 
725 int
726 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
727 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
728 			  struct iovec *src_iovs, uint32_t src_iovcnt,
729 			  uint64_t iv, uint32_t block_size,
730 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
731 {
732 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
733 	struct spdk_accel_task *accel_task;
734 
735 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
736 		return -EINVAL;
737 	}
738 
739 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
740 	if (spdk_unlikely(accel_task == NULL)) {
741 		return -ENOMEM;
742 	}
743 
744 	accel_task->crypto_key = key;
745 	accel_task->s.iovs = src_iovs;
746 	accel_task->s.iovcnt = src_iovcnt;
747 	accel_task->d.iovs = dst_iovs;
748 	accel_task->d.iovcnt = dst_iovcnt;
749 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
750 	accel_task->iv = iv;
751 	accel_task->block_size = block_size;
752 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
753 	accel_task->src_domain = NULL;
754 	accel_task->dst_domain = NULL;
755 
756 	return accel_submit_task(accel_ch, accel_task);
757 }
758 
759 int
760 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
761 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
762 			  struct iovec *src_iovs, uint32_t src_iovcnt,
763 			  uint64_t iv, uint32_t block_size,
764 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
765 {
766 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
767 	struct spdk_accel_task *accel_task;
768 
769 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
770 		return -EINVAL;
771 	}
772 
773 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
774 	if (spdk_unlikely(accel_task == NULL)) {
775 		return -ENOMEM;
776 	}
777 
778 	accel_task->crypto_key = key;
779 	accel_task->s.iovs = src_iovs;
780 	accel_task->s.iovcnt = src_iovcnt;
781 	accel_task->d.iovs = dst_iovs;
782 	accel_task->d.iovcnt = dst_iovcnt;
783 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
784 	accel_task->iv = iv;
785 	accel_task->block_size = block_size;
786 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
787 	accel_task->src_domain = NULL;
788 	accel_task->dst_domain = NULL;
789 
790 	return accel_submit_task(accel_ch, accel_task);
791 }
792 
793 int
794 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
795 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
796 {
797 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
798 	struct spdk_accel_task *accel_task;
799 
800 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
801 	if (spdk_unlikely(accel_task == NULL)) {
802 		return -ENOMEM;
803 	}
804 
805 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
806 
807 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
808 	accel_task->nsrcs.srcs = sources;
809 	accel_task->nsrcs.cnt = nsrcs;
810 	accel_task->d.iovs[0].iov_base = dst;
811 	accel_task->d.iovs[0].iov_len = nbytes;
812 	accel_task->d.iovcnt = 1;
813 	accel_task->nbytes = nbytes;
814 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
815 	accel_task->src_domain = NULL;
816 	accel_task->dst_domain = NULL;
817 
818 	return accel_submit_task(accel_ch, accel_task);
819 }
820 
821 int
822 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
823 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
824 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
825 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
826 {
827 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
828 	struct spdk_accel_task *accel_task;
829 
830 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
831 	if (accel_task == NULL) {
832 		return -ENOMEM;
833 	}
834 
835 	accel_task->s.iovs = iovs;
836 	accel_task->s.iovcnt = iovcnt;
837 	accel_task->dif.ctx = ctx;
838 	accel_task->dif.err = err;
839 	accel_task->dif.num_blocks = num_blocks;
840 	accel_task->nbytes = num_blocks * ctx->block_size;
841 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
842 	accel_task->src_domain = NULL;
843 	accel_task->dst_domain = NULL;
844 
845 	return accel_submit_task(accel_ch, accel_task);
846 }
847 
848 int
849 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
850 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
851 			       const struct spdk_dif_ctx *ctx,
852 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
853 {
854 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
855 	struct spdk_accel_task *accel_task;
856 
857 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
858 	if (accel_task == NULL) {
859 		return -ENOMEM;
860 	}
861 
862 	accel_task->s.iovs = iovs;
863 	accel_task->s.iovcnt = iovcnt;
864 	accel_task->dif.ctx = ctx;
865 	accel_task->dif.num_blocks = num_blocks;
866 	accel_task->nbytes = num_blocks * ctx->block_size;
867 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
868 	accel_task->src_domain = NULL;
869 	accel_task->dst_domain = NULL;
870 
871 	return accel_submit_task(accel_ch, accel_task);
872 }
873 
874 int
875 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
876 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
877 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
878 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
879 {
880 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
881 	struct spdk_accel_task *accel_task;
882 
883 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
884 	if (accel_task == NULL) {
885 		return -ENOMEM;
886 	}
887 
888 	accel_task->s.iovs = src_iovs;
889 	accel_task->s.iovcnt = src_iovcnt;
890 	accel_task->d.iovs = dst_iovs;
891 	accel_task->d.iovcnt = dst_iovcnt;
892 	accel_task->dif.ctx = ctx;
893 	accel_task->dif.num_blocks = num_blocks;
894 	accel_task->nbytes = num_blocks * ctx->block_size;
895 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
896 	accel_task->src_domain = NULL;
897 	accel_task->dst_domain = NULL;
898 
899 	return accel_submit_task(accel_ch, accel_task);
900 }
901 
902 int
903 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
904 				  struct iovec *dst_iovs, size_t dst_iovcnt,
905 				  struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
906 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
907 				  spdk_accel_completion_cb cb_fn, void *cb_arg)
908 {
909 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
910 	struct spdk_accel_task *accel_task;
911 
912 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
913 	if (accel_task == NULL) {
914 		return -ENOMEM;
915 	}
916 
917 	accel_task->s.iovs = src_iovs;
918 	accel_task->s.iovcnt = src_iovcnt;
919 	accel_task->d.iovs = dst_iovs;
920 	accel_task->d.iovcnt = dst_iovcnt;
921 	accel_task->dif.ctx = ctx;
922 	accel_task->dif.err = err;
923 	accel_task->dif.num_blocks = num_blocks;
924 	accel_task->nbytes = num_blocks * ctx->block_size;
925 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
926 	accel_task->src_domain = NULL;
927 	accel_task->dst_domain = NULL;
928 
929 	return accel_submit_task(accel_ch, accel_task);
930 }
931 
932 int
933 spdk_accel_submit_dix_generate(struct spdk_io_channel *ch, struct iovec *iovs,
934 			       size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
935 			       const struct spdk_dif_ctx *ctx, spdk_accel_completion_cb cb_fn,
936 			       void *cb_arg)
937 {
938 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
939 	struct spdk_accel_task *accel_task;
940 
941 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
942 	if (accel_task == NULL) {
943 		return -ENOMEM;
944 	}
945 
946 	accel_task->s.iovs = iovs;
947 	accel_task->s.iovcnt = iovcnt;
948 	accel_task->d.iovs = md_iov;
949 	accel_task->d.iovcnt = 1;
950 	accel_task->dif.ctx = ctx;
951 	accel_task->dif.num_blocks = num_blocks;
952 	accel_task->nbytes = num_blocks * ctx->block_size;
953 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_GENERATE;
954 	accel_task->src_domain = NULL;
955 	accel_task->dst_domain = NULL;
956 
957 	return accel_submit_task(accel_ch, accel_task);
958 }
959 
960 int
961 spdk_accel_submit_dix_verify(struct spdk_io_channel *ch, struct iovec *iovs,
962 			     size_t iovcnt, struct iovec *md_iov, uint32_t num_blocks,
963 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
964 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
965 {
966 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
967 	struct spdk_accel_task *accel_task;
968 
969 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
970 	if (accel_task == NULL) {
971 		return -ENOMEM;
972 	}
973 
974 	accel_task->s.iovs = iovs;
975 	accel_task->s.iovcnt = iovcnt;
976 	accel_task->d.iovs = md_iov;
977 	accel_task->d.iovcnt = 1;
978 	accel_task->dif.ctx = ctx;
979 	accel_task->dif.err = err;
980 	accel_task->dif.num_blocks = num_blocks;
981 	accel_task->nbytes = num_blocks * ctx->block_size;
982 	accel_task->op_code = SPDK_ACCEL_OPC_DIX_VERIFY;
983 	accel_task->src_domain = NULL;
984 	accel_task->dst_domain = NULL;
985 
986 	return accel_submit_task(accel_ch, accel_task);
987 }
988 
989 static inline struct accel_buffer *
990 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
991 {
992 	struct accel_buffer *buf;
993 
994 	buf = SLIST_FIRST(&ch->buf_pool);
995 	if (spdk_unlikely(buf == NULL)) {
996 		accel_update_stats(ch, retry.bufdesc, 1);
997 		return NULL;
998 	}
999 
1000 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
1001 	buf->len = len;
1002 	buf->buf = NULL;
1003 	buf->seq = NULL;
1004 	buf->cb_fn = NULL;
1005 
1006 	return buf;
1007 }
1008 
1009 static inline void
1010 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
1011 {
1012 	if (buf->buf != NULL) {
1013 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
1014 	}
1015 
1016 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
1017 }
1018 
1019 static inline struct spdk_accel_sequence *
1020 accel_sequence_get(struct accel_io_channel *ch)
1021 {
1022 	struct spdk_accel_sequence *seq;
1023 
1024 	assert(g_opts.task_count >= ch->stats.task_outstanding);
1025 
1026 	/* Sequence cannot be allocated if number of available task objects cannot satisfy required limit.
1027 	 * This is to prevent potential dead lock when few requests are pending task resource and none can
1028 	 * advance the processing. This solution should work only if there is single async operation after
1029 	 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if
1030 	 * there are more async operations then solution should be improved. */
1031 	if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) {
1032 		return NULL;
1033 	}
1034 
1035 	seq = SLIST_FIRST(&ch->seq_pool);
1036 	if (spdk_unlikely(seq == NULL)) {
1037 		accel_update_stats(ch, retry.sequence, 1);
1038 		return NULL;
1039 	}
1040 
1041 	accel_update_stats(ch, sequence_outstanding, 1);
1042 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
1043 
1044 	TAILQ_INIT(&seq->tasks);
1045 	SLIST_INIT(&seq->bounce_bufs);
1046 
1047 	seq->ch = ch;
1048 	seq->status = 0;
1049 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
1050 	seq->in_process_sequence = false;
1051 
1052 	return seq;
1053 }
1054 
1055 static inline void
1056 accel_sequence_put(struct spdk_accel_sequence *seq)
1057 {
1058 	struct accel_io_channel *ch = seq->ch;
1059 	struct accel_buffer *buf;
1060 
1061 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
1062 		buf = SLIST_FIRST(&seq->bounce_bufs);
1063 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
1064 		accel_put_buf(seq->ch, buf);
1065 	}
1066 
1067 	assert(TAILQ_EMPTY(&seq->tasks));
1068 	seq->ch = NULL;
1069 
1070 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
1071 	accel_update_stats(ch, sequence_outstanding, -1);
1072 }
1073 
1074 static void accel_sequence_task_cb(void *cb_arg, int status);
1075 
1076 static inline struct spdk_accel_task *
1077 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
1078 			spdk_accel_step_cb cb_fn, void *cb_arg)
1079 {
1080 	struct spdk_accel_task *task;
1081 
1082 	task = _get_task(ch, NULL, NULL);
1083 	if (spdk_unlikely(task == NULL)) {
1084 		return task;
1085 	}
1086 
1087 	task->step_cb_fn = cb_fn;
1088 	task->cb_arg = cb_arg;
1089 	task->seq = seq;
1090 
1091 	return task;
1092 }
1093 
1094 int
1095 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1096 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
1097 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1098 		       struct iovec *src_iovs, uint32_t src_iovcnt,
1099 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1100 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1101 {
1102 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1103 	struct spdk_accel_task *task;
1104 	struct spdk_accel_sequence *seq = *pseq;
1105 
1106 	if (seq == NULL) {
1107 		seq = accel_sequence_get(accel_ch);
1108 		if (spdk_unlikely(seq == NULL)) {
1109 			return -ENOMEM;
1110 		}
1111 	}
1112 
1113 	assert(seq->ch == accel_ch);
1114 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1115 	if (spdk_unlikely(task == NULL)) {
1116 		if (*pseq == NULL) {
1117 			accel_sequence_put(seq);
1118 		}
1119 
1120 		return -ENOMEM;
1121 	}
1122 
1123 	task->dst_domain = dst_domain;
1124 	task->dst_domain_ctx = dst_domain_ctx;
1125 	task->d.iovs = dst_iovs;
1126 	task->d.iovcnt = dst_iovcnt;
1127 	task->src_domain = src_domain;
1128 	task->src_domain_ctx = src_domain_ctx;
1129 	task->s.iovs = src_iovs;
1130 	task->s.iovcnt = src_iovcnt;
1131 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1132 	task->op_code = SPDK_ACCEL_OPC_COPY;
1133 
1134 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1135 	*pseq = seq;
1136 
1137 	return 0;
1138 }
1139 
1140 int
1141 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1142 		       void *buf, uint64_t len,
1143 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1144 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1145 {
1146 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1147 	struct spdk_accel_task *task;
1148 	struct spdk_accel_sequence *seq = *pseq;
1149 
1150 	if (seq == NULL) {
1151 		seq = accel_sequence_get(accel_ch);
1152 		if (spdk_unlikely(seq == NULL)) {
1153 			return -ENOMEM;
1154 		}
1155 	}
1156 
1157 	assert(seq->ch == accel_ch);
1158 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1159 	if (spdk_unlikely(task == NULL)) {
1160 		if (*pseq == NULL) {
1161 			accel_sequence_put(seq);
1162 		}
1163 
1164 		return -ENOMEM;
1165 	}
1166 
1167 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1168 
1169 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1170 	if (spdk_unlikely(!task->aux)) {
1171 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1172 		if (*pseq == NULL) {
1173 			accel_sequence_put((seq));
1174 		}
1175 
1176 		task->seq = NULL;
1177 		_put_task(task->accel_ch, task);
1178 		assert(0);
1179 		return -ENOMEM;
1180 	}
1181 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1182 	task->has_aux = true;
1183 
1184 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1185 	task->d.iovs[0].iov_base = buf;
1186 	task->d.iovs[0].iov_len = len;
1187 	task->d.iovcnt = 1;
1188 	task->nbytes = len;
1189 	task->src_domain = NULL;
1190 	task->dst_domain = domain;
1191 	task->dst_domain_ctx = domain_ctx;
1192 	task->op_code = SPDK_ACCEL_OPC_FILL;
1193 
1194 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1195 	*pseq = seq;
1196 
1197 	return 0;
1198 }
1199 
1200 int
1201 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1202 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1203 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1204 			     struct iovec *src_iovs, size_t src_iovcnt,
1205 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1206 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1207 {
1208 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1209 	struct spdk_accel_task *task;
1210 	struct spdk_accel_sequence *seq = *pseq;
1211 
1212 	if (seq == NULL) {
1213 		seq = accel_sequence_get(accel_ch);
1214 		if (spdk_unlikely(seq == NULL)) {
1215 			return -ENOMEM;
1216 		}
1217 	}
1218 
1219 	assert(seq->ch == accel_ch);
1220 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1221 	if (spdk_unlikely(task == NULL)) {
1222 		if (*pseq == NULL) {
1223 			accel_sequence_put(seq);
1224 		}
1225 
1226 		return -ENOMEM;
1227 	}
1228 
1229 	/* TODO: support output_size for chaining */
1230 	task->output_size = NULL;
1231 	task->dst_domain = dst_domain;
1232 	task->dst_domain_ctx = dst_domain_ctx;
1233 	task->d.iovs = dst_iovs;
1234 	task->d.iovcnt = dst_iovcnt;
1235 	task->src_domain = src_domain;
1236 	task->src_domain_ctx = src_domain_ctx;
1237 	task->s.iovs = src_iovs;
1238 	task->s.iovcnt = src_iovcnt;
1239 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1240 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1241 
1242 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1243 	*pseq = seq;
1244 
1245 	return 0;
1246 }
1247 
1248 int
1249 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1250 			  struct spdk_accel_crypto_key *key,
1251 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1252 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1253 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1254 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1255 			  uint64_t iv, uint32_t block_size,
1256 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1257 {
1258 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1259 	struct spdk_accel_task *task;
1260 	struct spdk_accel_sequence *seq = *pseq;
1261 
1262 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1263 
1264 	if (seq == NULL) {
1265 		seq = accel_sequence_get(accel_ch);
1266 		if (spdk_unlikely(seq == NULL)) {
1267 			return -ENOMEM;
1268 		}
1269 	}
1270 
1271 	assert(seq->ch == accel_ch);
1272 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1273 	if (spdk_unlikely(task == NULL)) {
1274 		if (*pseq == NULL) {
1275 			accel_sequence_put(seq);
1276 		}
1277 
1278 		return -ENOMEM;
1279 	}
1280 
1281 	task->crypto_key = key;
1282 	task->src_domain = src_domain;
1283 	task->src_domain_ctx = src_domain_ctx;
1284 	task->s.iovs = src_iovs;
1285 	task->s.iovcnt = src_iovcnt;
1286 	task->dst_domain = dst_domain;
1287 	task->dst_domain_ctx = dst_domain_ctx;
1288 	task->d.iovs = dst_iovs;
1289 	task->d.iovcnt = dst_iovcnt;
1290 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1291 	task->iv = iv;
1292 	task->block_size = block_size;
1293 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1294 
1295 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1296 	*pseq = seq;
1297 
1298 	return 0;
1299 }
1300 
1301 int
1302 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1303 			  struct spdk_accel_crypto_key *key,
1304 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1305 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1306 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1307 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1308 			  uint64_t iv, uint32_t block_size,
1309 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1310 {
1311 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1312 	struct spdk_accel_task *task;
1313 	struct spdk_accel_sequence *seq = *pseq;
1314 
1315 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1316 
1317 	if (seq == NULL) {
1318 		seq = accel_sequence_get(accel_ch);
1319 		if (spdk_unlikely(seq == NULL)) {
1320 			return -ENOMEM;
1321 		}
1322 	}
1323 
1324 	assert(seq->ch == accel_ch);
1325 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1326 	if (spdk_unlikely(task == NULL)) {
1327 		if (*pseq == NULL) {
1328 			accel_sequence_put(seq);
1329 		}
1330 
1331 		return -ENOMEM;
1332 	}
1333 
1334 	task->crypto_key = key;
1335 	task->src_domain = src_domain;
1336 	task->src_domain_ctx = src_domain_ctx;
1337 	task->s.iovs = src_iovs;
1338 	task->s.iovcnt = src_iovcnt;
1339 	task->dst_domain = dst_domain;
1340 	task->dst_domain_ctx = dst_domain_ctx;
1341 	task->d.iovs = dst_iovs;
1342 	task->d.iovcnt = dst_iovcnt;
1343 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1344 	task->iv = iv;
1345 	task->block_size = block_size;
1346 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1347 
1348 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1349 	*pseq = seq;
1350 
1351 	return 0;
1352 }
1353 
1354 int
1355 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1356 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1357 			 struct spdk_memory_domain *domain, void *domain_ctx,
1358 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1359 {
1360 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1361 	struct spdk_accel_task *task;
1362 	struct spdk_accel_sequence *seq = *pseq;
1363 
1364 	if (seq == NULL) {
1365 		seq = accel_sequence_get(accel_ch);
1366 		if (spdk_unlikely(seq == NULL)) {
1367 			return -ENOMEM;
1368 		}
1369 	}
1370 
1371 	assert(seq->ch == accel_ch);
1372 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1373 	if (spdk_unlikely(task == NULL)) {
1374 		if (*pseq == NULL) {
1375 			accel_sequence_put(seq);
1376 		}
1377 
1378 		return -ENOMEM;
1379 	}
1380 
1381 	task->s.iovs = iovs;
1382 	task->s.iovcnt = iovcnt;
1383 	task->src_domain = domain;
1384 	task->src_domain_ctx = domain_ctx;
1385 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1386 	task->crc_dst = dst;
1387 	task->seed = seed;
1388 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1389 	task->dst_domain = NULL;
1390 
1391 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1392 	*pseq = seq;
1393 
1394 	return 0;
1395 }
1396 
1397 int
1398 spdk_accel_append_dif_verify(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1399 			     struct iovec *iovs, size_t iovcnt,
1400 			     struct spdk_memory_domain *domain, void *domain_ctx,
1401 			     uint32_t num_blocks,
1402 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1403 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1404 {
1405 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1406 	struct spdk_accel_task *task;
1407 	struct spdk_accel_sequence *seq = *pseq;
1408 
1409 	if (seq == NULL) {
1410 		seq = accel_sequence_get(accel_ch);
1411 		if (spdk_unlikely(seq == NULL)) {
1412 			return -ENOMEM;
1413 		}
1414 	}
1415 
1416 	assert(seq->ch == accel_ch);
1417 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1418 	if (spdk_unlikely(task == NULL)) {
1419 		if (*pseq == NULL) {
1420 			accel_sequence_put(seq);
1421 		}
1422 
1423 		return -ENOMEM;
1424 	}
1425 
1426 	task->s.iovs = iovs;
1427 	task->s.iovcnt = iovcnt;
1428 	task->src_domain = domain;
1429 	task->src_domain_ctx = domain_ctx;
1430 	task->dst_domain = NULL;
1431 	task->dif.ctx = ctx;
1432 	task->dif.err = err;
1433 	task->dif.num_blocks = num_blocks;
1434 	task->nbytes = num_blocks * ctx->block_size;
1435 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
1436 
1437 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1438 	*pseq = seq;
1439 
1440 	return 0;
1441 }
1442 
1443 int
1444 spdk_accel_append_dif_verify_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1445 				  struct iovec *dst_iovs, size_t dst_iovcnt,
1446 				  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1447 				  struct iovec *src_iovs, size_t src_iovcnt,
1448 				  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1449 				  uint32_t num_blocks,
1450 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1451 				  spdk_accel_step_cb cb_fn, void *cb_arg)
1452 {
1453 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1454 	struct spdk_accel_task *task;
1455 	struct spdk_accel_sequence *seq = *pseq;
1456 
1457 	if (seq == NULL) {
1458 		seq = accel_sequence_get(accel_ch);
1459 		if (spdk_unlikely(seq == NULL)) {
1460 			return -ENOMEM;
1461 		}
1462 	}
1463 
1464 	assert(seq->ch == accel_ch);
1465 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1466 	if (spdk_unlikely(task == NULL)) {
1467 		if (*pseq == NULL) {
1468 			accel_sequence_put(seq);
1469 		}
1470 
1471 		return -ENOMEM;
1472 	}
1473 
1474 	task->dst_domain = dst_domain;
1475 	task->dst_domain_ctx = dst_domain_ctx;
1476 	task->d.iovs = dst_iovs;
1477 	task->d.iovcnt = dst_iovcnt;
1478 	task->src_domain = src_domain;
1479 	task->src_domain_ctx = src_domain_ctx;
1480 	task->s.iovs = src_iovs;
1481 	task->s.iovcnt = src_iovcnt;
1482 	task->dif.ctx = ctx;
1483 	task->dif.err = err;
1484 	task->dif.num_blocks = num_blocks;
1485 	task->nbytes = num_blocks * ctx->block_size;
1486 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
1487 
1488 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1489 	*pseq = seq;
1490 
1491 	return 0;
1492 }
1493 
1494 int
1495 spdk_accel_append_dif_generate(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1496 			       struct iovec *iovs, size_t iovcnt,
1497 			       struct spdk_memory_domain *domain, void *domain_ctx,
1498 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1499 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1500 {
1501 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1502 	struct spdk_accel_task *task;
1503 	struct spdk_accel_sequence *seq = *pseq;
1504 
1505 	if (seq == NULL) {
1506 		seq = accel_sequence_get(accel_ch);
1507 		if (spdk_unlikely(seq == NULL)) {
1508 			return -ENOMEM;
1509 		}
1510 	}
1511 
1512 	assert(seq->ch == accel_ch);
1513 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1514 	if (spdk_unlikely(task == NULL)) {
1515 		if (*pseq == NULL) {
1516 			accel_sequence_put(seq);
1517 		}
1518 
1519 		return -ENOMEM;
1520 	}
1521 
1522 	task->s.iovs = iovs;
1523 	task->s.iovcnt = iovcnt;
1524 	task->src_domain = domain;
1525 	task->src_domain_ctx = domain_ctx;
1526 	task->dst_domain = NULL;
1527 	task->dif.ctx = ctx;
1528 	task->dif.num_blocks = num_blocks;
1529 	task->nbytes = num_blocks * ctx->block_size;
1530 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
1531 
1532 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1533 	*pseq = seq;
1534 
1535 	return 0;
1536 }
1537 
1538 int
1539 spdk_accel_append_dif_generate_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1540 				    struct iovec *dst_iovs, size_t dst_iovcnt,
1541 				    struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1542 				    struct iovec *src_iovs, size_t src_iovcnt,
1543 				    struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1544 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1545 				    spdk_accel_step_cb cb_fn, void *cb_arg)
1546 {
1547 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1548 	struct spdk_accel_task *task;
1549 	struct spdk_accel_sequence *seq = *pseq;
1550 
1551 	if (seq == NULL) {
1552 		seq = accel_sequence_get(accel_ch);
1553 		if (spdk_unlikely(seq == NULL)) {
1554 			return -ENOMEM;
1555 		}
1556 	}
1557 
1558 	assert(seq->ch == accel_ch);
1559 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1560 	if (spdk_unlikely(task == NULL)) {
1561 		if (*pseq == NULL) {
1562 			accel_sequence_put(seq);
1563 		}
1564 
1565 		return -ENOMEM;
1566 	}
1567 
1568 	task->dst_domain = dst_domain;
1569 	task->dst_domain_ctx = dst_domain_ctx;
1570 	task->d.iovs = dst_iovs;
1571 	task->d.iovcnt = dst_iovcnt;
1572 	task->src_domain = src_domain;
1573 	task->src_domain_ctx = src_domain_ctx;
1574 	task->s.iovs = src_iovs;
1575 	task->s.iovcnt = src_iovcnt;
1576 	task->nbytes = num_blocks * ctx->block_size;
1577 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
1578 
1579 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1580 	*pseq = seq;
1581 
1582 	return 0;
1583 }
1584 
1585 int
1586 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1587 		   struct spdk_memory_domain **domain, void **domain_ctx)
1588 {
1589 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1590 	struct accel_buffer *accel_buf;
1591 
1592 	accel_buf = accel_get_buf(accel_ch, len);
1593 	if (spdk_unlikely(accel_buf == NULL)) {
1594 		return -ENOMEM;
1595 	}
1596 
1597 	accel_buf->ch = accel_ch;
1598 
1599 	/* We always return the same pointer and identify the buffers through domain_ctx */
1600 	*buf = ACCEL_BUFFER_BASE;
1601 	*domain_ctx = accel_buf;
1602 	*domain = g_accel_domain;
1603 
1604 	return 0;
1605 }
1606 
1607 void
1608 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1609 		   struct spdk_memory_domain *domain, void *domain_ctx)
1610 {
1611 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1612 	struct accel_buffer *accel_buf = domain_ctx;
1613 
1614 	assert(domain == g_accel_domain);
1615 	assert(buf == ACCEL_BUFFER_BASE);
1616 
1617 	accel_put_buf(accel_ch, accel_buf);
1618 }
1619 
1620 static void
1621 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1622 {
1623 	struct accel_io_channel *ch = seq->ch;
1624 	spdk_accel_step_cb cb_fn;
1625 	void *cb_arg;
1626 
1627 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1628 	cb_fn = task->step_cb_fn;
1629 	cb_arg = task->cb_arg;
1630 	task->seq = NULL;
1631 	if (task->has_aux) {
1632 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1633 		task->aux = NULL;
1634 		task->has_aux = false;
1635 	}
1636 
1637 	_put_task(ch, task);
1638 
1639 	if (cb_fn != NULL) {
1640 		cb_fn(cb_arg);
1641 	}
1642 }
1643 
1644 static void
1645 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1646 {
1647 	struct spdk_accel_task *task;
1648 
1649 	while (!TAILQ_EMPTY(&seq->tasks)) {
1650 		task = TAILQ_FIRST(&seq->tasks);
1651 		accel_sequence_complete_task(seq, task);
1652 	}
1653 }
1654 
1655 static void
1656 accel_sequence_complete(struct spdk_accel_sequence *seq)
1657 {
1658 	spdk_accel_completion_cb cb_fn = seq->cb_fn;
1659 	void *cb_arg = seq->cb_arg;
1660 	int status = seq->status;
1661 
1662 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status);
1663 
1664 	accel_update_stats(seq->ch, sequence_executed, 1);
1665 	if (spdk_unlikely(status != 0)) {
1666 		accel_update_stats(seq->ch, sequence_failed, 1);
1667 	}
1668 
1669 	/* First notify all users that appended operations to this sequence */
1670 	accel_sequence_complete_tasks(seq);
1671 	accel_sequence_put(seq);
1672 
1673 	/* Then notify the user that finished the sequence */
1674 	cb_fn(cb_arg, status);
1675 }
1676 
1677 static void
1678 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1679 {
1680 	uintptr_t offset;
1681 
1682 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1683 	assert(offset < accel_buf->len);
1684 
1685 	diov->iov_base = (char *)accel_buf->buf + offset;
1686 	diov->iov_len = siov->iov_len;
1687 }
1688 
1689 static void
1690 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1691 {
1692 	struct spdk_accel_task *task;
1693 	struct iovec *iov;
1694 
1695 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1696 	 * in a sequence that were using it.
1697 	 */
1698 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1699 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1700 			if (!task->has_aux) {
1701 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1702 				assert(task->aux && "Can't allocate aux data structure");
1703 				task->has_aux = true;
1704 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1705 			}
1706 
1707 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1708 			assert(task->s.iovcnt == 1);
1709 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1710 			task->src_domain = NULL;
1711 			task->s.iovs = iov;
1712 		}
1713 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1714 			if (!task->has_aux) {
1715 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1716 				assert(task->aux && "Can't allocate aux data structure");
1717 				task->has_aux = true;
1718 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1719 			}
1720 
1721 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1722 			assert(task->d.iovcnt == 1);
1723 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1724 			task->dst_domain = NULL;
1725 			task->d.iovs = iov;
1726 		}
1727 	}
1728 }
1729 
1730 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1731 
1732 static void
1733 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1734 {
1735 	struct accel_buffer *accel_buf;
1736 
1737 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1738 
1739 	assert(accel_buf->seq != NULL);
1740 	assert(accel_buf->buf == NULL);
1741 	accel_buf->buf = buf;
1742 
1743 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1744 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1745 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1746 	accel_process_sequence(accel_buf->seq);
1747 }
1748 
1749 static bool
1750 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1751 			 spdk_iobuf_get_cb cb_fn)
1752 {
1753 	struct accel_io_channel *ch = seq->ch;
1754 
1755 	assert(buf->seq == NULL);
1756 
1757 	buf->seq = seq;
1758 
1759 	/* Buffer might be already allocated by memory domain translation. */
1760 	if (buf->buf) {
1761 		return true;
1762 	}
1763 
1764 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1765 	if (spdk_unlikely(buf->buf == NULL)) {
1766 		accel_update_stats(ch, retry.iobuf, 1);
1767 		return false;
1768 	}
1769 
1770 	return true;
1771 }
1772 
1773 static bool
1774 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1775 {
1776 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1777 	 * NULL */
1778 	if (task->src_domain == g_accel_domain) {
1779 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1780 					      accel_iobuf_get_virtbuf_cb)) {
1781 			return false;
1782 		}
1783 
1784 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1785 	}
1786 
1787 	if (task->dst_domain == g_accel_domain) {
1788 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1789 					      accel_iobuf_get_virtbuf_cb)) {
1790 			return false;
1791 		}
1792 
1793 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1794 	}
1795 
1796 	return true;
1797 }
1798 
1799 static void
1800 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1801 {
1802 	struct accel_buffer *accel_buf;
1803 
1804 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1805 
1806 	assert(accel_buf->seq != NULL);
1807 	assert(accel_buf->buf == NULL);
1808 	accel_buf->buf = buf;
1809 
1810 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1811 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1812 }
1813 
1814 bool
1815 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1816 			      struct spdk_memory_domain *domain, void *domain_ctx,
1817 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1818 {
1819 	struct accel_buffer *accel_buf = domain_ctx;
1820 
1821 	assert(domain == g_accel_domain);
1822 	accel_buf->cb_fn = cb_fn;
1823 	accel_buf->cb_ctx = cb_ctx;
1824 
1825 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1826 		return false;
1827 	}
1828 
1829 	accel_sequence_set_virtbuf(seq, accel_buf);
1830 
1831 	return true;
1832 }
1833 
1834 struct spdk_accel_task *
1835 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1836 {
1837 	return TAILQ_FIRST(&seq->tasks);
1838 }
1839 
1840 struct spdk_accel_task *
1841 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1842 {
1843 	return TAILQ_NEXT(task, seq_link);
1844 }
1845 
1846 static inline void
1847 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1848 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1849 			struct accel_buffer *buf)
1850 {
1851 	bounce->orig_iovs = *iovs;
1852 	bounce->orig_iovcnt = *iovcnt;
1853 	bounce->orig_domain = *domain;
1854 	bounce->orig_domain_ctx = *domain_ctx;
1855 	bounce->iov.iov_base = buf->buf;
1856 	bounce->iov.iov_len = buf->len;
1857 
1858 	*iovs = &bounce->iov;
1859 	*iovcnt = 1;
1860 	*domain = NULL;
1861 }
1862 
1863 static void
1864 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1865 {
1866 	struct spdk_accel_task *task;
1867 	struct accel_buffer *accel_buf;
1868 
1869 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1870 	assert(accel_buf->buf == NULL);
1871 	accel_buf->buf = buf;
1872 
1873 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1874 	assert(task != NULL);
1875 
1876 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1877 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1878 	assert(task->aux);
1879 	assert(task->has_aux);
1880 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1881 				&task->src_domain_ctx, accel_buf);
1882 	accel_process_sequence(accel_buf->seq);
1883 }
1884 
1885 static void
1886 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1887 {
1888 	struct spdk_accel_task *task;
1889 	struct accel_buffer *accel_buf;
1890 
1891 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1892 	assert(accel_buf->buf == NULL);
1893 	accel_buf->buf = buf;
1894 
1895 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1896 	assert(task != NULL);
1897 
1898 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1899 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1900 	assert(task->aux);
1901 	assert(task->has_aux);
1902 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1903 				&task->dst_domain_ctx, accel_buf);
1904 	accel_process_sequence(accel_buf->seq);
1905 }
1906 
1907 static int
1908 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1909 {
1910 	struct accel_buffer *buf;
1911 
1912 	if (task->src_domain != NULL) {
1913 		/* By the time we're here, accel buffers should have been allocated */
1914 		assert(task->src_domain != g_accel_domain);
1915 
1916 		if (!task->has_aux) {
1917 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1918 			if (spdk_unlikely(!task->aux)) {
1919 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1920 				assert(0);
1921 				return -EAGAIN;
1922 			}
1923 			task->has_aux = true;
1924 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1925 		}
1926 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1927 		if (buf == NULL) {
1928 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1929 			return -ENOMEM;
1930 		}
1931 
1932 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1933 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1934 			return -EAGAIN;
1935 		}
1936 
1937 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1938 					&task->src_domain, &task->src_domain_ctx, buf);
1939 	}
1940 
1941 	if (task->dst_domain != NULL) {
1942 		/* By the time we're here, accel buffers should have been allocated */
1943 		assert(task->dst_domain != g_accel_domain);
1944 
1945 		if (!task->has_aux) {
1946 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1947 			if (spdk_unlikely(!task->aux)) {
1948 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1949 				assert(0);
1950 				return -EAGAIN;
1951 			}
1952 			task->has_aux = true;
1953 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1954 		}
1955 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1956 		if (buf == NULL) {
1957 			/* The src buffer will be released when a sequence is completed */
1958 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1959 			return -ENOMEM;
1960 		}
1961 
1962 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1963 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1964 			return -EAGAIN;
1965 		}
1966 
1967 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1968 					&task->dst_domain, &task->dst_domain_ctx, buf);
1969 	}
1970 
1971 	return 0;
1972 }
1973 
1974 static void
1975 accel_task_pull_data_cb(void *ctx, int status)
1976 {
1977 	struct spdk_accel_sequence *seq = ctx;
1978 
1979 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1980 	if (spdk_likely(status == 0)) {
1981 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1982 	} else {
1983 		accel_sequence_set_fail(seq, status);
1984 	}
1985 
1986 	accel_process_sequence(seq);
1987 }
1988 
1989 static void
1990 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1991 {
1992 	int rc;
1993 
1994 	assert(task->has_aux);
1995 	assert(task->aux);
1996 	assert(task->aux->bounce.s.orig_iovs != NULL);
1997 	assert(task->aux->bounce.s.orig_domain != NULL);
1998 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1999 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2000 
2001 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
2002 					  task->aux->bounce.s.orig_domain_ctx,
2003 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
2004 					  task->s.iovs, task->s.iovcnt,
2005 					  accel_task_pull_data_cb, seq);
2006 	if (spdk_unlikely(rc != 0)) {
2007 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
2008 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2009 		accel_sequence_set_fail(seq, rc);
2010 	}
2011 }
2012 
2013 static void
2014 accel_task_push_data_cb(void *ctx, int status)
2015 {
2016 	struct spdk_accel_sequence *seq = ctx;
2017 
2018 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2019 	if (spdk_likely(status == 0)) {
2020 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2021 	} else {
2022 		accel_sequence_set_fail(seq, status);
2023 	}
2024 
2025 	accel_process_sequence(seq);
2026 }
2027 
2028 static void
2029 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
2030 {
2031 	int rc;
2032 
2033 	assert(task->has_aux);
2034 	assert(task->aux);
2035 	assert(task->aux->bounce.d.orig_iovs != NULL);
2036 	assert(task->aux->bounce.d.orig_domain != NULL);
2037 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
2038 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
2039 
2040 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
2041 					  task->aux->bounce.d.orig_domain_ctx,
2042 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
2043 					  task->d.iovs, task->d.iovcnt,
2044 					  accel_task_push_data_cb, seq);
2045 	if (spdk_unlikely(rc != 0)) {
2046 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
2047 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
2048 		accel_sequence_set_fail(seq, rc);
2049 	}
2050 }
2051 
2052 static void
2053 accel_process_sequence(struct spdk_accel_sequence *seq)
2054 {
2055 	struct accel_io_channel *accel_ch = seq->ch;
2056 	struct spdk_accel_task *task;
2057 	enum accel_sequence_state state;
2058 	int rc;
2059 
2060 	/* Prevent recursive calls to this function */
2061 	if (spdk_unlikely(seq->in_process_sequence)) {
2062 		return;
2063 	}
2064 	seq->in_process_sequence = true;
2065 
2066 	task = TAILQ_FIRST(&seq->tasks);
2067 	do {
2068 		state = seq->state;
2069 		switch (state) {
2070 		case ACCEL_SEQUENCE_STATE_INIT:
2071 			if (g_accel_driver != NULL) {
2072 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
2073 				break;
2074 			}
2075 		/* Fall through */
2076 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
2077 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
2078 			if (!accel_sequence_check_virtbuf(seq, task)) {
2079 				/* We couldn't allocate a buffer, wait until one is available */
2080 				break;
2081 			}
2082 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2083 		/* Fall through */
2084 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
2085 			/* If a module supports memory domains, we don't need to allocate bounce
2086 			 * buffers */
2087 			if (g_modules_opc[task->op_code].supports_memory_domains) {
2088 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2089 				break;
2090 			}
2091 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2092 			rc = accel_sequence_check_bouncebuf(seq, task);
2093 			if (spdk_unlikely(rc != 0)) {
2094 				/* We couldn't allocate a buffer, wait until one is available */
2095 				if (rc == -EAGAIN) {
2096 					break;
2097 				}
2098 				accel_sequence_set_fail(seq, rc);
2099 				break;
2100 			}
2101 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
2102 				assert(task->aux->bounce.s.orig_iovs);
2103 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
2104 				break;
2105 			}
2106 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2107 		/* Fall through */
2108 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
2109 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
2110 				      g_opcode_strings[task->op_code], seq);
2111 
2112 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
2113 			rc = accel_submit_task(accel_ch, task);
2114 			if (spdk_unlikely(rc != 0)) {
2115 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
2116 					    g_opcode_strings[task->op_code], seq);
2117 				accel_sequence_set_fail(seq, rc);
2118 			}
2119 			break;
2120 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
2121 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2122 			accel_task_pull_data(seq, task);
2123 			break;
2124 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
2125 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
2126 				assert(task->aux->bounce.d.orig_iovs);
2127 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
2128 				break;
2129 			}
2130 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2131 			break;
2132 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
2133 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2134 			accel_task_push_data(seq, task);
2135 			break;
2136 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
2137 			accel_sequence_complete_task(seq, task);
2138 			/* Check if there are any remaining tasks */
2139 			task = TAILQ_FIRST(&seq->tasks);
2140 			if (task == NULL) {
2141 				/* Immediately return here to make sure we don't touch the sequence
2142 				 * after it's completed */
2143 				accel_sequence_complete(seq);
2144 				return;
2145 			}
2146 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
2147 			break;
2148 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
2149 			assert(!TAILQ_EMPTY(&seq->tasks));
2150 
2151 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2152 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
2153 			if (spdk_unlikely(rc != 0)) {
2154 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
2155 					    seq, g_accel_driver->name);
2156 				accel_sequence_set_fail(seq, rc);
2157 			}
2158 			break;
2159 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
2160 			/* Get the task again, as the driver might have completed some tasks
2161 			 * synchronously */
2162 			task = TAILQ_FIRST(&seq->tasks);
2163 			if (task == NULL) {
2164 				/* Immediately return here to make sure we don't touch the sequence
2165 				 * after it's completed */
2166 				accel_sequence_complete(seq);
2167 				return;
2168 			}
2169 			/* We don't want to execute the next task through the driver, so we
2170 			 * explicitly omit the INIT state here */
2171 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
2172 			break;
2173 		case ACCEL_SEQUENCE_STATE_ERROR:
2174 			/* Immediately return here to make sure we don't touch the sequence
2175 			 * after it's completed */
2176 			assert(seq->status != 0);
2177 			accel_sequence_complete(seq);
2178 			return;
2179 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
2180 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
2181 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
2182 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2183 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
2184 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2185 			break;
2186 		default:
2187 			assert(0 && "bad state");
2188 			break;
2189 		}
2190 	} while (seq->state != state);
2191 
2192 	seq->in_process_sequence = false;
2193 }
2194 
2195 static void
2196 accel_sequence_task_cb(void *cb_arg, int status)
2197 {
2198 	struct spdk_accel_sequence *seq = cb_arg;
2199 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
2200 
2201 	switch (seq->state) {
2202 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2203 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
2204 		if (spdk_unlikely(status != 0)) {
2205 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
2206 				    g_opcode_strings[task->op_code], seq);
2207 			accel_sequence_set_fail(seq, status);
2208 		}
2209 
2210 		accel_process_sequence(seq);
2211 		break;
2212 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2213 		assert(g_accel_driver != NULL);
2214 		/* Immediately remove the task from the outstanding list to make sure the next call
2215 		 * to spdk_accel_sequence_first_task() doesn't return it */
2216 		accel_sequence_complete_task(seq, task);
2217 		if (spdk_unlikely(status != 0)) {
2218 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
2219 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
2220 				    g_accel_driver->name);
2221 			/* Update status without using accel_sequence_set_fail() to avoid changing
2222 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
2223 			seq->status = status;
2224 		}
2225 		break;
2226 	default:
2227 		assert(0 && "bad state");
2228 		break;
2229 	}
2230 }
2231 
2232 void
2233 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
2234 {
2235 	assert(g_accel_driver != NULL);
2236 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2237 
2238 	if (spdk_likely(seq->status == 0)) {
2239 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
2240 	} else {
2241 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
2242 	}
2243 
2244 	accel_process_sequence(seq);
2245 }
2246 
2247 static bool
2248 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
2249 {
2250 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
2251 	if (iovacnt != iovbcnt) {
2252 		return false;
2253 	}
2254 
2255 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
2256 }
2257 
2258 static bool
2259 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
2260 {
2261 	struct spdk_accel_task *prev;
2262 
2263 	switch (task->op_code) {
2264 	case SPDK_ACCEL_OPC_DECOMPRESS:
2265 	case SPDK_ACCEL_OPC_FILL:
2266 	case SPDK_ACCEL_OPC_ENCRYPT:
2267 	case SPDK_ACCEL_OPC_DECRYPT:
2268 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2269 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2270 		if (task->dst_domain != next->src_domain) {
2271 			return false;
2272 		}
2273 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2274 					next->s.iovs, next->s.iovcnt)) {
2275 			return false;
2276 		}
2277 		task->d.iovs = next->d.iovs;
2278 		task->d.iovcnt = next->d.iovcnt;
2279 		task->dst_domain = next->dst_domain;
2280 		task->dst_domain_ctx = next->dst_domain_ctx;
2281 		break;
2282 	case SPDK_ACCEL_OPC_CRC32C:
2283 		/* crc32 is special, because it doesn't have a dst buffer */
2284 		if (task->src_domain != next->src_domain) {
2285 			return false;
2286 		}
2287 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2288 					next->s.iovs, next->s.iovcnt)) {
2289 			return false;
2290 		}
2291 		/* We can only change crc32's buffer if we can change previous task's buffer */
2292 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2293 		if (prev == NULL) {
2294 			return false;
2295 		}
2296 		if (!accel_task_set_dstbuf(prev, next)) {
2297 			return false;
2298 		}
2299 		task->s.iovs = next->d.iovs;
2300 		task->s.iovcnt = next->d.iovcnt;
2301 		task->src_domain = next->dst_domain;
2302 		task->src_domain_ctx = next->dst_domain_ctx;
2303 		break;
2304 	default:
2305 		return false;
2306 	}
2307 
2308 	return true;
2309 }
2310 
2311 static void
2312 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2313 			   struct spdk_accel_task **next_task)
2314 {
2315 	struct spdk_accel_task *next = *next_task;
2316 
2317 	switch (task->op_code) {
2318 	case SPDK_ACCEL_OPC_COPY:
2319 		/* We only allow changing src of operations that actually have a src, e.g. we never
2320 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2321 		 * change the src of the operation after fill (which in turn could also be a fill).
2322 		 * So, for the sake of simplicity, skip this type of operations for now.
2323 		 */
2324 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2325 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2326 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2327 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2328 		    next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C &&
2329 		    next->op_code != SPDK_ACCEL_OPC_DIF_GENERATE_COPY &&
2330 		    next->op_code != SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
2331 			break;
2332 		}
2333 		if (task->dst_domain != next->src_domain) {
2334 			break;
2335 		}
2336 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2337 					next->s.iovs, next->s.iovcnt)) {
2338 			break;
2339 		}
2340 		next->s.iovs = task->s.iovs;
2341 		next->s.iovcnt = task->s.iovcnt;
2342 		next->src_domain = task->src_domain;
2343 		next->src_domain_ctx = task->src_domain_ctx;
2344 		accel_sequence_complete_task(seq, task);
2345 		break;
2346 	case SPDK_ACCEL_OPC_DECOMPRESS:
2347 	case SPDK_ACCEL_OPC_FILL:
2348 	case SPDK_ACCEL_OPC_ENCRYPT:
2349 	case SPDK_ACCEL_OPC_DECRYPT:
2350 	case SPDK_ACCEL_OPC_CRC32C:
2351 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2352 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2353 		/* We can only merge tasks when one of them is a copy */
2354 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2355 			break;
2356 		}
2357 		if (!accel_task_set_dstbuf(task, next)) {
2358 			break;
2359 		}
2360 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2361 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2362 		*next_task = TAILQ_NEXT(next, seq_link);
2363 		accel_sequence_complete_task(seq, next);
2364 		break;
2365 	default:
2366 		assert(0 && "bad opcode");
2367 		break;
2368 	}
2369 }
2370 
2371 void
2372 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2373 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2374 {
2375 	struct spdk_accel_task *task, *next;
2376 
2377 	/* Try to remove any copy operations if possible */
2378 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2379 		if (next == NULL) {
2380 			break;
2381 		}
2382 		accel_sequence_merge_tasks(seq, task, &next);
2383 	}
2384 
2385 	seq->cb_fn = cb_fn;
2386 	seq->cb_arg = cb_arg;
2387 
2388 	accel_process_sequence(seq);
2389 }
2390 
2391 void
2392 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2393 {
2394 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2395 	struct spdk_accel_task *task;
2396 
2397 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2398 
2399 	while (!TAILQ_EMPTY(&tasks)) {
2400 		task = TAILQ_FIRST(&tasks);
2401 		TAILQ_REMOVE(&tasks, task, seq_link);
2402 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2403 	}
2404 }
2405 
2406 void
2407 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2408 {
2409 	if (seq == NULL) {
2410 		return;
2411 	}
2412 
2413 	accel_sequence_complete_tasks(seq);
2414 	accel_sequence_put(seq);
2415 }
2416 
2417 struct spdk_memory_domain *
2418 spdk_accel_get_memory_domain(void)
2419 {
2420 	return g_accel_domain;
2421 }
2422 
2423 static struct spdk_accel_module_if *
2424 _module_find_by_name(const char *name)
2425 {
2426 	struct spdk_accel_module_if *accel_module = NULL;
2427 
2428 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2429 		if (strcmp(name, accel_module->name) == 0) {
2430 			break;
2431 		}
2432 	}
2433 
2434 	return accel_module;
2435 }
2436 
2437 static inline struct spdk_accel_crypto_key *
2438 _accel_crypto_key_get(const char *name)
2439 {
2440 	struct spdk_accel_crypto_key *key;
2441 
2442 	assert(spdk_spin_held(&g_keyring_spin));
2443 
2444 	TAILQ_FOREACH(key, &g_keyring, link) {
2445 		if (strcmp(name, key->param.key_name) == 0) {
2446 			return key;
2447 		}
2448 	}
2449 
2450 	return NULL;
2451 }
2452 
2453 static void
2454 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2455 {
2456 	if (key->param.hex_key) {
2457 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2458 		free(key->param.hex_key);
2459 	}
2460 	if (key->param.hex_key2) {
2461 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2462 		free(key->param.hex_key2);
2463 	}
2464 	free(key->param.tweak_mode);
2465 	free(key->param.key_name);
2466 	free(key->param.cipher);
2467 	if (key->key) {
2468 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2469 		free(key->key);
2470 	}
2471 	if (key->key2) {
2472 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2473 		free(key->key2);
2474 	}
2475 	free(key);
2476 }
2477 
2478 static void
2479 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2480 {
2481 	assert(key->module_if);
2482 	assert(key->module_if->crypto_key_deinit);
2483 
2484 	key->module_if->crypto_key_deinit(key);
2485 	accel_crypto_key_free_mem(key);
2486 }
2487 
2488 /*
2489  * This function mitigates a timing side channel which could be caused by using strcmp()
2490  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2491  * the article [1] for more details
2492  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2493  */
2494 static bool
2495 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2496 {
2497 	size_t i;
2498 	volatile size_t x = k1_len ^ k2_len;
2499 
2500 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2501 		x |= k1[i] ^ k2[i];
2502 	}
2503 
2504 	return x == 0;
2505 }
2506 
2507 static const char *g_tweak_modes[] = {
2508 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2509 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2510 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2511 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2512 };
2513 
2514 static const char *g_ciphers[] = {
2515 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2516 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2517 };
2518 
2519 int
2520 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2521 {
2522 	struct spdk_accel_module_if *module;
2523 	struct spdk_accel_crypto_key *key;
2524 	size_t hex_key_size, hex_key2_size;
2525 	bool found = false;
2526 	size_t i;
2527 	int rc;
2528 
2529 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2530 		return -EINVAL;
2531 	}
2532 
2533 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2534 		/* hardly ever possible, but let's check and warn the user */
2535 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2536 	}
2537 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2538 
2539 	if (!module) {
2540 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2541 		return -ENOENT;
2542 	}
2543 
2544 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2545 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2546 		return -ENOTSUP;
2547 	}
2548 
2549 	key = calloc(1, sizeof(*key));
2550 	if (!key) {
2551 		return -ENOMEM;
2552 	}
2553 
2554 	key->param.key_name = strdup(param->key_name);
2555 	if (!key->param.key_name) {
2556 		rc = -ENOMEM;
2557 		goto error;
2558 	}
2559 
2560 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2561 		assert(g_ciphers[i]);
2562 
2563 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2564 			key->cipher = i;
2565 			found = true;
2566 			break;
2567 		}
2568 	}
2569 
2570 	if (!found) {
2571 		SPDK_ERRLOG("Failed to parse cipher\n");
2572 		rc = -EINVAL;
2573 		goto error;
2574 	}
2575 
2576 	key->param.cipher = strdup(param->cipher);
2577 	if (!key->param.cipher) {
2578 		rc = -ENOMEM;
2579 		goto error;
2580 	}
2581 
2582 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2583 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2584 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2585 		rc = -EINVAL;
2586 		goto error;
2587 	}
2588 
2589 	if (hex_key_size == 0) {
2590 		SPDK_ERRLOG("key1 size cannot be 0\n");
2591 		rc = -EINVAL;
2592 		goto error;
2593 	}
2594 
2595 	key->param.hex_key = strdup(param->hex_key);
2596 	if (!key->param.hex_key) {
2597 		rc = -ENOMEM;
2598 		goto error;
2599 	}
2600 
2601 	key->key_size = hex_key_size / 2;
2602 	key->key = spdk_unhexlify(key->param.hex_key);
2603 	if (!key->key) {
2604 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2605 		rc = -EINVAL;
2606 		goto error;
2607 	}
2608 
2609 	if (param->hex_key2) {
2610 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2611 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2612 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2613 			rc = -EINVAL;
2614 			goto error;
2615 		}
2616 
2617 		if (hex_key2_size == 0) {
2618 			SPDK_ERRLOG("key2 size cannot be 0\n");
2619 			rc = -EINVAL;
2620 			goto error;
2621 		}
2622 
2623 		key->param.hex_key2 = strdup(param->hex_key2);
2624 		if (!key->param.hex_key2) {
2625 			rc = -ENOMEM;
2626 			goto error;
2627 		}
2628 
2629 		key->key2_size = hex_key2_size / 2;
2630 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2631 		if (!key->key2) {
2632 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2633 			rc = -EINVAL;
2634 			goto error;
2635 		}
2636 	}
2637 
2638 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2639 	if (param->tweak_mode) {
2640 		found = false;
2641 
2642 		key->param.tweak_mode = strdup(param->tweak_mode);
2643 		if (!key->param.tweak_mode) {
2644 			rc = -ENOMEM;
2645 			goto error;
2646 		}
2647 
2648 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2649 			assert(g_tweak_modes[i]);
2650 
2651 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2652 				key->tweak_mode = i;
2653 				found = true;
2654 				break;
2655 			}
2656 		}
2657 
2658 		if (!found) {
2659 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2660 			rc = -EINVAL;
2661 			goto error;
2662 		}
2663 	}
2664 
2665 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2666 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2667 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2668 			    g_tweak_modes[key->tweak_mode]);
2669 		rc = -EINVAL;
2670 		goto error;
2671 	}
2672 
2673 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2674 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2675 			    g_ciphers[key->cipher], key->key_size);
2676 		rc = -EINVAL;
2677 		goto error;
2678 	}
2679 
2680 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2681 		if (!key->key2) {
2682 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2683 			rc = -EINVAL;
2684 			goto error;
2685 		}
2686 
2687 		if (key->key_size != key->key2_size) {
2688 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2689 				    key->key_size,
2690 				    key->key2_size);
2691 			rc = -EINVAL;
2692 			goto error;
2693 		}
2694 
2695 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2696 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2697 			rc = -EINVAL;
2698 			goto error;
2699 		}
2700 	}
2701 
2702 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2703 		if (key->key2_size) {
2704 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2705 			rc = -EINVAL;
2706 			goto error;
2707 		}
2708 	}
2709 
2710 	key->module_if = module;
2711 
2712 	spdk_spin_lock(&g_keyring_spin);
2713 	if (_accel_crypto_key_get(param->key_name)) {
2714 		rc = -EEXIST;
2715 	} else {
2716 		rc = module->crypto_key_init(key);
2717 		if (rc) {
2718 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2719 		} else {
2720 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2721 		}
2722 	}
2723 	spdk_spin_unlock(&g_keyring_spin);
2724 
2725 	if (rc) {
2726 		goto error;
2727 	}
2728 
2729 	return 0;
2730 
2731 error:
2732 	accel_crypto_key_free_mem(key);
2733 	return rc;
2734 }
2735 
2736 int
2737 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2738 {
2739 	if (!key || !key->module_if) {
2740 		return -EINVAL;
2741 	}
2742 
2743 	spdk_spin_lock(&g_keyring_spin);
2744 	if (!_accel_crypto_key_get(key->param.key_name)) {
2745 		spdk_spin_unlock(&g_keyring_spin);
2746 		return -ENOENT;
2747 	}
2748 	TAILQ_REMOVE(&g_keyring, key, link);
2749 	spdk_spin_unlock(&g_keyring_spin);
2750 
2751 	accel_crypto_key_destroy_unsafe(key);
2752 
2753 	return 0;
2754 }
2755 
2756 struct spdk_accel_crypto_key *
2757 spdk_accel_crypto_key_get(const char *name)
2758 {
2759 	struct spdk_accel_crypto_key *key;
2760 
2761 	spdk_spin_lock(&g_keyring_spin);
2762 	key = _accel_crypto_key_get(name);
2763 	spdk_spin_unlock(&g_keyring_spin);
2764 
2765 	return key;
2766 }
2767 
2768 /* Helper function when accel modules register with the framework. */
2769 void
2770 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2771 {
2772 	struct spdk_accel_module_if *tmp;
2773 
2774 	if (_module_find_by_name(accel_module->name)) {
2775 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2776 		assert(false);
2777 		return;
2778 	}
2779 
2780 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2781 		if (accel_module->priority < tmp->priority) {
2782 			break;
2783 		}
2784 	}
2785 
2786 	if (tmp != NULL) {
2787 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2788 	} else {
2789 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2790 	}
2791 }
2792 
2793 /* Framework level channel create callback. */
2794 static int
2795 accel_create_channel(void *io_device, void *ctx_buf)
2796 {
2797 	struct accel_io_channel	*accel_ch = ctx_buf;
2798 	struct spdk_accel_task *accel_task;
2799 	struct spdk_accel_task_aux_data *accel_task_aux;
2800 	struct spdk_accel_sequence *seq;
2801 	struct accel_buffer *buf;
2802 	size_t task_size_aligned;
2803 	uint8_t *task_mem;
2804 	uint32_t i = 0, j;
2805 	int rc;
2806 
2807 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2808 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2809 				   g_opts.task_count * task_size_aligned);
2810 	if (!accel_ch->task_pool_base) {
2811 		return -ENOMEM;
2812 	}
2813 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2814 
2815 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2816 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2817 	if (accel_ch->seq_pool_base == NULL) {
2818 		goto err;
2819 	}
2820 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2821 
2822 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2823 	if (accel_ch->task_aux_data_base == NULL) {
2824 		goto err;
2825 	}
2826 
2827 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2828 	if (accel_ch->buf_pool_base == NULL) {
2829 		goto err;
2830 	}
2831 
2832 	STAILQ_INIT(&accel_ch->task_pool);
2833 	SLIST_INIT(&accel_ch->task_aux_data_pool);
2834 	SLIST_INIT(&accel_ch->seq_pool);
2835 	SLIST_INIT(&accel_ch->buf_pool);
2836 
2837 	task_mem = accel_ch->task_pool_base;
2838 	for (i = 0; i < g_opts.task_count; i++) {
2839 		accel_task = (struct spdk_accel_task *)task_mem;
2840 		accel_task->aux = NULL;
2841 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2842 		task_mem += task_size_aligned;
2843 		accel_task_aux = &accel_ch->task_aux_data_base[i];
2844 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2845 	}
2846 	for (i = 0; i < g_opts.sequence_count; i++) {
2847 		seq = &accel_ch->seq_pool_base[i];
2848 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2849 	}
2850 	for (i = 0; i < g_opts.buf_count; i++) {
2851 		buf = &accel_ch->buf_pool_base[i];
2852 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2853 	}
2854 
2855 	/* Assign modules and get IO channels for each */
2856 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2857 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2858 		/* This can happen if idxd runs out of channels. */
2859 		if (accel_ch->module_ch[i] == NULL) {
2860 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2861 			goto err;
2862 		}
2863 	}
2864 
2865 	if (g_accel_driver != NULL) {
2866 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2867 		if (accel_ch->driver_channel == NULL) {
2868 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2869 			goto err;
2870 		}
2871 	}
2872 
2873 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2874 				     g_opts.large_cache_size);
2875 	if (rc != 0) {
2876 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2877 		goto err;
2878 	}
2879 
2880 	return 0;
2881 err:
2882 	if (accel_ch->driver_channel != NULL) {
2883 		spdk_put_io_channel(accel_ch->driver_channel);
2884 	}
2885 	for (j = 0; j < i; j++) {
2886 		spdk_put_io_channel(accel_ch->module_ch[j]);
2887 	}
2888 	free(accel_ch->task_pool_base);
2889 	free(accel_ch->task_aux_data_base);
2890 	free(accel_ch->seq_pool_base);
2891 	free(accel_ch->buf_pool_base);
2892 
2893 	return -ENOMEM;
2894 }
2895 
2896 static void
2897 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2898 {
2899 	int i;
2900 
2901 	total->sequence_executed += stats->sequence_executed;
2902 	total->sequence_failed += stats->sequence_failed;
2903 	total->sequence_outstanding += stats->sequence_outstanding;
2904 	total->task_outstanding += stats->task_outstanding;
2905 	total->retry.task += stats->retry.task;
2906 	total->retry.sequence += stats->retry.sequence;
2907 	total->retry.iobuf += stats->retry.iobuf;
2908 	total->retry.bufdesc += stats->retry.bufdesc;
2909 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2910 		total->operations[i].executed += stats->operations[i].executed;
2911 		total->operations[i].failed += stats->operations[i].failed;
2912 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2913 	}
2914 }
2915 
2916 /* Framework level channel destroy callback. */
2917 static void
2918 accel_destroy_channel(void *io_device, void *ctx_buf)
2919 {
2920 	struct accel_io_channel	*accel_ch = ctx_buf;
2921 	int i;
2922 
2923 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2924 
2925 	if (accel_ch->driver_channel != NULL) {
2926 		spdk_put_io_channel(accel_ch->driver_channel);
2927 	}
2928 
2929 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2930 		assert(accel_ch->module_ch[i] != NULL);
2931 		spdk_put_io_channel(accel_ch->module_ch[i]);
2932 		accel_ch->module_ch[i] = NULL;
2933 	}
2934 
2935 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2936 	spdk_spin_lock(&g_stats_lock);
2937 	accel_add_stats(&g_stats, &accel_ch->stats);
2938 	spdk_spin_unlock(&g_stats_lock);
2939 
2940 	free(accel_ch->task_pool_base);
2941 	free(accel_ch->task_aux_data_base);
2942 	free(accel_ch->seq_pool_base);
2943 	free(accel_ch->buf_pool_base);
2944 }
2945 
2946 struct spdk_io_channel *
2947 spdk_accel_get_io_channel(void)
2948 {
2949 	return spdk_get_io_channel(&spdk_accel_module_list);
2950 }
2951 
2952 static int
2953 accel_module_initialize(void)
2954 {
2955 	struct spdk_accel_module_if *accel_module, *tmp_module;
2956 	int rc = 0, module_rc;
2957 
2958 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2959 		module_rc = accel_module->module_init();
2960 		if (module_rc) {
2961 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2962 			if (module_rc == -ENODEV) {
2963 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2964 			} else if (!rc) {
2965 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2966 				rc = module_rc;
2967 			}
2968 			continue;
2969 		}
2970 
2971 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2972 	}
2973 
2974 	return rc;
2975 }
2976 
2977 static void
2978 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2979 {
2980 	struct accel_module *module = &g_modules_opc[opcode];
2981 	struct spdk_accel_module_if *module_if = module->module;
2982 
2983 	if (module_if->get_memory_domains != NULL) {
2984 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2985 	}
2986 }
2987 
2988 static int
2989 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2990 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2991 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2992 {
2993 	struct accel_buffer *buf = src_domain_ctx;
2994 
2995 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2996 
2997 	assert(g_accel_domain == src_domain);
2998 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
2999 	assert(buf->buf == NULL);
3000 	assert(addr == ACCEL_BUFFER_BASE);
3001 	assert(len == buf->len);
3002 
3003 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
3004 	if (spdk_unlikely(buf->buf == NULL)) {
3005 		return -ENOMEM;
3006 	}
3007 
3008 	result->iov_count = 1;
3009 	result->iov.iov_base = buf->buf;
3010 	result->iov.iov_len = buf->len;
3011 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
3012 	return 0;
3013 }
3014 
3015 static void
3016 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
3017 			       struct iovec *iov, uint32_t iovcnt)
3018 {
3019 	struct accel_buffer *buf = domain_ctx;
3020 
3021 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
3022 
3023 	assert(g_accel_domain == domain);
3024 	assert(iovcnt == 1);
3025 	assert(buf->buf != NULL);
3026 	assert(iov[0].iov_base == buf->buf);
3027 	assert(iov[0].iov_len == buf->len);
3028 
3029 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
3030 	buf->buf = NULL;
3031 }
3032 
3033 int
3034 spdk_accel_initialize(void)
3035 {
3036 	enum spdk_accel_opcode op;
3037 	struct spdk_accel_module_if *accel_module = NULL;
3038 	int rc;
3039 
3040 	/*
3041 	 * We need a unique identifier for the accel framework, so use the
3042 	 * spdk_accel_module_list address for this purpose.
3043 	 */
3044 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
3045 				sizeof(struct accel_io_channel), "accel");
3046 
3047 	spdk_spin_init(&g_keyring_spin);
3048 	spdk_spin_init(&g_stats_lock);
3049 
3050 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
3051 				       "SPDK_ACCEL_DMA_DEVICE");
3052 	if (rc != 0) {
3053 		SPDK_ERRLOG("Failed to create accel memory domain\n");
3054 		return rc;
3055 	}
3056 
3057 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
3058 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
3059 
3060 	g_modules_started = true;
3061 	rc = accel_module_initialize();
3062 	if (rc) {
3063 		return rc;
3064 	}
3065 
3066 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
3067 		rc = g_accel_driver->init();
3068 		if (rc != 0) {
3069 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
3070 				    spdk_strerror(-rc));
3071 			return rc;
3072 		}
3073 	}
3074 
3075 	/* The module list is order by priority, with the highest priority modules being at the end
3076 	 * of the list.  The software module should be somewhere at the beginning of the list,
3077 	 * before all HW modules.
3078 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
3079 	 * initialized to support the operation.
3080 	 */
3081 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3082 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3083 			if (accel_module->supports_opcode(op)) {
3084 				g_modules_opc[op].module = accel_module;
3085 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
3086 			}
3087 		}
3088 
3089 		if (accel_module->get_ctx_size != NULL) {
3090 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
3091 							   accel_module->get_ctx_size());
3092 		}
3093 	}
3094 
3095 	/* Now lets check for overrides and apply all that exist */
3096 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3097 		if (g_modules_opc_override[op] != NULL) {
3098 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
3099 			if (accel_module == NULL) {
3100 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
3101 				return -EINVAL;
3102 			}
3103 			if (accel_module->supports_opcode(op) == false) {
3104 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
3105 				return -EINVAL;
3106 			}
3107 			g_modules_opc[op].module = accel_module;
3108 		}
3109 	}
3110 
3111 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
3112 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
3113 		return -EINVAL;
3114 	}
3115 
3116 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3117 		assert(g_modules_opc[op].module != NULL);
3118 		accel_module_init_opcode(op);
3119 	}
3120 
3121 	rc = spdk_iobuf_register_module("accel");
3122 	if (rc != 0) {
3123 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
3124 		return rc;
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 static void
3131 accel_module_finish_cb(void)
3132 {
3133 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
3134 
3135 	cb_fn(g_fini_cb_arg);
3136 	g_fini_cb_fn = NULL;
3137 	g_fini_cb_arg = NULL;
3138 }
3139 
3140 static void
3141 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
3142 			   const char *module_str)
3143 {
3144 	spdk_json_write_object_begin(w);
3145 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
3146 	spdk_json_write_named_object_begin(w, "params");
3147 	spdk_json_write_named_string(w, "opname", opc_str);
3148 	spdk_json_write_named_string(w, "module", module_str);
3149 	spdk_json_write_object_end(w);
3150 	spdk_json_write_object_end(w);
3151 }
3152 
3153 static void
3154 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3155 {
3156 	spdk_json_write_named_string(w, "name", key->param.key_name);
3157 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
3158 	spdk_json_write_named_string(w, "key", key->param.hex_key);
3159 	if (key->param.hex_key2) {
3160 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
3161 	}
3162 
3163 	if (key->param.tweak_mode) {
3164 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
3165 	}
3166 }
3167 
3168 void
3169 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3170 {
3171 	spdk_json_write_object_begin(w);
3172 	__accel_crypto_key_dump_param(w, key);
3173 	spdk_json_write_object_end(w);
3174 }
3175 
3176 static void
3177 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
3178 				    struct spdk_accel_crypto_key *key)
3179 {
3180 	spdk_json_write_object_begin(w);
3181 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
3182 	spdk_json_write_named_object_begin(w, "params");
3183 	__accel_crypto_key_dump_param(w, key);
3184 	spdk_json_write_object_end(w);
3185 	spdk_json_write_object_end(w);
3186 }
3187 
3188 static void
3189 accel_write_options(struct spdk_json_write_ctx *w)
3190 {
3191 	spdk_json_write_object_begin(w);
3192 	spdk_json_write_named_string(w, "method", "accel_set_options");
3193 	spdk_json_write_named_object_begin(w, "params");
3194 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
3195 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
3196 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
3197 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
3198 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
3199 	spdk_json_write_object_end(w);
3200 	spdk_json_write_object_end(w);
3201 }
3202 
3203 static void
3204 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
3205 {
3206 	struct spdk_accel_crypto_key *key;
3207 
3208 	spdk_spin_lock(&g_keyring_spin);
3209 	TAILQ_FOREACH(key, &g_keyring, link) {
3210 		if (full_dump) {
3211 			_accel_crypto_key_write_config_json(w, key);
3212 		} else {
3213 			_accel_crypto_key_dump_param(w, key);
3214 		}
3215 	}
3216 	spdk_spin_unlock(&g_keyring_spin);
3217 }
3218 
3219 void
3220 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
3221 {
3222 	_accel_crypto_keys_write_config_json(w, false);
3223 }
3224 
3225 void
3226 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
3227 {
3228 	struct spdk_accel_module_if *accel_module;
3229 	int i;
3230 
3231 	spdk_json_write_array_begin(w);
3232 	accel_write_options(w);
3233 
3234 	if (g_accel_driver != NULL) {
3235 		spdk_json_write_object_begin(w);
3236 		spdk_json_write_named_string(w, "method", "accel_set_driver");
3237 		spdk_json_write_named_object_begin(w, "params");
3238 		spdk_json_write_named_string(w, "name", g_accel_driver->name);
3239 		spdk_json_write_object_end(w);
3240 		spdk_json_write_object_end(w);
3241 	}
3242 
3243 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3244 		if (accel_module->write_config_json) {
3245 			accel_module->write_config_json(w);
3246 		}
3247 	}
3248 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3249 		if (g_modules_opc_override[i]) {
3250 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
3251 		}
3252 	}
3253 
3254 	_accel_crypto_keys_write_config_json(w, true);
3255 
3256 	spdk_json_write_array_end(w);
3257 }
3258 
3259 void
3260 spdk_accel_module_finish(void)
3261 {
3262 	if (!g_accel_module) {
3263 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
3264 	} else {
3265 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
3266 	}
3267 
3268 	if (!g_accel_module) {
3269 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
3270 			g_accel_driver->fini();
3271 		}
3272 
3273 		spdk_spin_destroy(&g_keyring_spin);
3274 		spdk_spin_destroy(&g_stats_lock);
3275 		if (g_accel_domain) {
3276 			spdk_memory_domain_destroy(g_accel_domain);
3277 			g_accel_domain = NULL;
3278 		}
3279 		accel_module_finish_cb();
3280 		return;
3281 	}
3282 
3283 	if (g_accel_module->module_fini) {
3284 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
3285 	} else {
3286 		spdk_accel_module_finish();
3287 	}
3288 }
3289 
3290 static void
3291 accel_io_device_unregister_cb(void *io_device)
3292 {
3293 	struct spdk_accel_crypto_key *key, *key_tmp;
3294 	enum spdk_accel_opcode op;
3295 
3296 	spdk_spin_lock(&g_keyring_spin);
3297 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3298 		accel_crypto_key_destroy_unsafe(key);
3299 	}
3300 	spdk_spin_unlock(&g_keyring_spin);
3301 
3302 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3303 		if (g_modules_opc_override[op] != NULL) {
3304 			free(g_modules_opc_override[op]);
3305 			g_modules_opc_override[op] = NULL;
3306 		}
3307 		g_modules_opc[op].module = NULL;
3308 	}
3309 
3310 	spdk_accel_module_finish();
3311 }
3312 
3313 void
3314 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3315 {
3316 	assert(cb_fn != NULL);
3317 
3318 	g_fini_cb_fn = cb_fn;
3319 	g_fini_cb_arg = cb_arg;
3320 
3321 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3322 }
3323 
3324 static struct spdk_accel_driver *
3325 accel_find_driver(const char *name)
3326 {
3327 	struct spdk_accel_driver *driver;
3328 
3329 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3330 		if (strcmp(driver->name, name) == 0) {
3331 			return driver;
3332 		}
3333 	}
3334 
3335 	return NULL;
3336 }
3337 
3338 int
3339 spdk_accel_set_driver(const char *name)
3340 {
3341 	struct spdk_accel_driver *driver = NULL;
3342 
3343 	if (name != NULL && name[0] != '\0') {
3344 		driver = accel_find_driver(name);
3345 		if (driver == NULL) {
3346 			SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3347 			return -ENODEV;
3348 		}
3349 	}
3350 
3351 	g_accel_driver = driver;
3352 
3353 	return 0;
3354 }
3355 
3356 const char *
3357 spdk_accel_get_driver_name(void)
3358 {
3359 	if (!g_accel_driver) {
3360 		return NULL;
3361 	}
3362 
3363 	return g_accel_driver->name;
3364 }
3365 
3366 void
3367 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3368 {
3369 	if (accel_find_driver(driver->name)) {
3370 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3371 		assert(0);
3372 		return;
3373 	}
3374 
3375 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3376 }
3377 
3378 int
3379 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3380 {
3381 	if (!opts) {
3382 		SPDK_ERRLOG("opts cannot be NULL\n");
3383 		return -1;
3384 	}
3385 
3386 	if (!opts->opts_size) {
3387 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3388 		return -1;
3389 	}
3390 
3391 	if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count,
3392 			   opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) {
3393 		return -EINVAL;
3394 	}
3395 
3396 #define SET_FIELD(field) \
3397         if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3398                 g_opts.field = opts->field; \
3399         } \
3400 
3401 	SET_FIELD(small_cache_size);
3402 	SET_FIELD(large_cache_size);
3403 	SET_FIELD(task_count);
3404 	SET_FIELD(sequence_count);
3405 	SET_FIELD(buf_count);
3406 
3407 	g_opts.opts_size = opts->opts_size;
3408 
3409 #undef SET_FIELD
3410 
3411 	return 0;
3412 }
3413 
3414 void
3415 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3416 {
3417 	if (!opts) {
3418 		SPDK_ERRLOG("opts should not be NULL\n");
3419 		return;
3420 	}
3421 
3422 	if (!opts_size) {
3423 		SPDK_ERRLOG("opts_size should not be zero value\n");
3424 		return;
3425 	}
3426 
3427 	opts->opts_size = opts_size;
3428 
3429 #define SET_FIELD(field) \
3430 	if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3431 		opts->field = g_opts.field; \
3432 	} \
3433 
3434 	SET_FIELD(small_cache_size);
3435 	SET_FIELD(large_cache_size);
3436 	SET_FIELD(task_count);
3437 	SET_FIELD(sequence_count);
3438 	SET_FIELD(buf_count);
3439 
3440 #undef SET_FIELD
3441 
3442 	/* Do not remove this statement, you should always update this statement when you adding a new field,
3443 	 * and do not forget to add the SET_FIELD statement for your added field. */
3444 	SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3445 }
3446 
3447 struct accel_get_stats_ctx {
3448 	struct accel_stats	stats;
3449 	accel_get_stats_cb	cb_fn;
3450 	void			*cb_arg;
3451 };
3452 
3453 static void
3454 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3455 {
3456 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3457 
3458 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3459 	free(ctx);
3460 }
3461 
3462 static void
3463 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3464 {
3465 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3466 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3467 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3468 
3469 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3470 	spdk_for_each_channel_continue(iter, 0);
3471 }
3472 
3473 int
3474 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3475 {
3476 	struct accel_get_stats_ctx *ctx;
3477 
3478 	ctx = calloc(1, sizeof(*ctx));
3479 	if (ctx == NULL) {
3480 		return -ENOMEM;
3481 	}
3482 
3483 	spdk_spin_lock(&g_stats_lock);
3484 	accel_add_stats(&ctx->stats, &g_stats);
3485 	spdk_spin_unlock(&g_stats_lock);
3486 
3487 	ctx->cb_fn = cb_fn;
3488 	ctx->cb_arg = cb_arg;
3489 
3490 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3491 			      accel_get_channel_stats_done);
3492 
3493 	return 0;
3494 }
3495 
3496 void
3497 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3498 			    struct spdk_accel_opcode_stats *stats, size_t size)
3499 {
3500 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3501 
3502 #define FIELD_OK(field) \
3503 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3504 
3505 #define SET_FIELD(field, value) \
3506 	if (FIELD_OK(field)) { \
3507 		stats->field = value; \
3508 	}
3509 
3510 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3511 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3512 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3513 
3514 #undef FIELD_OK
3515 #undef SET_FIELD
3516 }
3517 
3518 uint8_t
3519 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3520 			 const struct spdk_accel_operation_exec_ctx *ctx)
3521 {
3522 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3523 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3524 
3525 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3526 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3527 	}
3528 
3529 	if (module->get_operation_info != NULL) {
3530 		module->get_operation_info(opcode, ctx, &modinfo);
3531 	}
3532 
3533 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3534 	 * fall back to accel_sw, which doesn't have any alignment requirements.  However, to be
3535 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3536 	 * hardware module. */
3537 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3538 }
3539 
3540 struct spdk_accel_module_if *
3541 spdk_accel_get_module(const char *name)
3542 {
3543 	struct spdk_accel_module_if *module;
3544 
3545 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3546 		if (strcmp(module->name, name) == 0) {
3547 			return module;
3548 		}
3549 	}
3550 
3551 	return NULL;
3552 }
3553 
3554 int
3555 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3556 				  struct spdk_memory_domain **domains,
3557 				  int array_size)
3558 {
3559 	assert(opcode < SPDK_ACCEL_OPC_LAST);
3560 
3561 	if (g_modules_opc[opcode].module->get_memory_domains) {
3562 		return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3563 	}
3564 
3565 	return 0;
3566 }
3567 
3568 SPDK_LOG_REGISTER_COMPONENT(accel)
3569