xref: /spdk/lib/accel/accel.c (revision a8d21b9b550dde7d3e7ffc0cd1171528a136165f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 
64 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
65 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
66 	"compress", "decompress", "encrypt", "decrypt"
67 };
68 
69 enum accel_sequence_state {
70 	ACCEL_SEQUENCE_STATE_INIT,
71 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
72 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
73 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
74 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
75 	ACCEL_SEQUENCE_STATE_PULL_DATA,
76 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
77 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
78 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
79 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
80 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
81 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
82 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
83 	ACCEL_SEQUENCE_STATE_ERROR,
84 	ACCEL_SEQUENCE_STATE_MAX,
85 };
86 
87 static const char *g_seq_states[]
88 __attribute__((unused)) = {
89 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
90 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
91 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
92 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
93 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
94 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
95 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
96 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
97 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
98 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
99 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
100 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
101 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
102 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
103 	[ACCEL_SEQUENCE_STATE_MAX] = "",
104 };
105 
106 #define ACCEL_SEQUENCE_STATE_STRING(s) \
107 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
108 	 ? g_seq_states[s] : "unknown")
109 
110 struct accel_buffer {
111 	struct spdk_accel_sequence	*seq;
112 	void				*buf;
113 	uint64_t			len;
114 	struct spdk_iobuf_entry		iobuf;
115 	TAILQ_ENTRY(accel_buffer)	link;
116 };
117 
118 struct accel_io_channel {
119 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
120 	void					*task_pool_base;
121 	struct spdk_accel_sequence		*seq_pool_base;
122 	struct accel_buffer			*buf_pool_base;
123 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
124 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
125 	TAILQ_HEAD(, accel_buffer)		buf_pool;
126 	struct spdk_iobuf_channel		iobuf;
127 };
128 
129 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
130 
131 struct spdk_accel_sequence {
132 	struct accel_io_channel			*ch;
133 	struct accel_sequence_tasks		tasks;
134 	struct accel_sequence_tasks		completed;
135 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
136 	enum accel_sequence_state		state;
137 	int					status;
138 	bool					in_process_sequence;
139 	spdk_accel_completion_cb		cb_fn;
140 	void					*cb_arg;
141 	TAILQ_ENTRY(spdk_accel_sequence)	link;
142 };
143 
144 static inline void
145 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
146 {
147 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
148 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
149 	seq->state = state;
150 }
151 
152 static void
153 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
154 {
155 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
156 	assert(status != 0);
157 	seq->status = status;
158 }
159 
160 int
161 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
162 {
163 	if (opcode >= ACCEL_OPC_LAST) {
164 		/* invalid opcode */
165 		return -EINVAL;
166 	}
167 
168 	if (g_modules_opc[opcode].module) {
169 		*module_name = g_modules_opc[opcode].module->name;
170 	} else {
171 		return -ENOENT;
172 	}
173 
174 	return 0;
175 }
176 
177 void
178 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
179 {
180 	struct spdk_accel_module_if *accel_module;
181 	enum accel_opcode opcode;
182 	int j = 0;
183 
184 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
185 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
186 			if (accel_module->supports_opcode(opcode)) {
187 				info->ops[j] = opcode;
188 				j++;
189 			}
190 		}
191 		info->name = accel_module->name;
192 		info->num_ops = j;
193 		fn(info);
194 		j = 0;
195 	}
196 }
197 
198 int
199 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
200 {
201 	int rc = 0;
202 
203 	if (opcode < ACCEL_OPC_LAST) {
204 		*opcode_name = g_opcode_strings[opcode];
205 	} else {
206 		/* invalid opcode */
207 		rc = -EINVAL;
208 	}
209 
210 	return rc;
211 }
212 
213 int
214 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
215 {
216 	if (g_modules_started == true) {
217 		/* we don't allow re-assignment once things have started */
218 		return -EINVAL;
219 	}
220 
221 	if (opcode >= ACCEL_OPC_LAST) {
222 		/* invalid opcode */
223 		return -EINVAL;
224 	}
225 
226 	/* module selection will be validated after the framework starts. */
227 	g_modules_opc_override[opcode] = strdup(name);
228 
229 	return 0;
230 }
231 
232 void
233 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
234 {
235 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
236 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
237 	void				*cb_arg = accel_task->cb_arg;
238 
239 	/* We should put the accel_task into the list firstly in order to avoid
240 	 * the accel task list is exhausted when there is recursive call to
241 	 * allocate accel_task in user's call back function (cb_fn)
242 	 */
243 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
244 
245 	cb_fn(cb_arg, status);
246 }
247 
248 inline static struct spdk_accel_task *
249 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
250 {
251 	struct spdk_accel_task *accel_task;
252 
253 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
254 	if (accel_task == NULL) {
255 		return NULL;
256 	}
257 
258 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
259 	accel_task->link.tqe_next = NULL;
260 	accel_task->link.tqe_prev = NULL;
261 
262 	accel_task->cb_fn = cb_fn;
263 	accel_task->cb_arg = cb_arg;
264 	accel_task->accel_ch = accel_ch;
265 	accel_task->bounce.s.orig_iovs = NULL;
266 	accel_task->bounce.d.orig_iovs = NULL;
267 
268 	return accel_task;
269 }
270 
271 /* Accel framework public API for copy function */
272 int
273 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
274 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
275 {
276 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
277 	struct spdk_accel_task *accel_task;
278 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY].module;
279 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY];
280 
281 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
282 	if (accel_task == NULL) {
283 		return -ENOMEM;
284 	}
285 
286 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
287 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
288 	accel_task->d.iovs[0].iov_base = dst;
289 	accel_task->d.iovs[0].iov_len = nbytes;
290 	accel_task->d.iovcnt = 1;
291 	accel_task->s.iovs[0].iov_base = src;
292 	accel_task->s.iovs[0].iov_len = nbytes;
293 	accel_task->s.iovcnt = 1;
294 	accel_task->op_code = ACCEL_OPC_COPY;
295 	accel_task->flags = flags;
296 	accel_task->src_domain = NULL;
297 	accel_task->dst_domain = NULL;
298 	accel_task->step_cb_fn = NULL;
299 
300 	return module->submit_tasks(module_ch, accel_task);
301 }
302 
303 /* Accel framework public API for dual cast copy function */
304 int
305 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
306 			   void *dst2, void *src, uint64_t nbytes, int flags,
307 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
308 {
309 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
310 	struct spdk_accel_task *accel_task;
311 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DUALCAST].module;
312 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DUALCAST];
313 
314 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
315 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
316 		return -EINVAL;
317 	}
318 
319 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
320 	if (accel_task == NULL) {
321 		return -ENOMEM;
322 	}
323 
324 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
325 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
326 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
327 	accel_task->d.iovs[0].iov_base = dst1;
328 	accel_task->d.iovs[0].iov_len = nbytes;
329 	accel_task->d.iovcnt = 1;
330 	accel_task->d2.iovs[0].iov_base = dst2;
331 	accel_task->d2.iovs[0].iov_len = nbytes;
332 	accel_task->d2.iovcnt = 1;
333 	accel_task->s.iovs[0].iov_base = src;
334 	accel_task->s.iovs[0].iov_len = nbytes;
335 	accel_task->s.iovcnt = 1;
336 	accel_task->flags = flags;
337 	accel_task->op_code = ACCEL_OPC_DUALCAST;
338 	accel_task->src_domain = NULL;
339 	accel_task->dst_domain = NULL;
340 	accel_task->step_cb_fn = NULL;
341 
342 	return module->submit_tasks(module_ch, accel_task);
343 }
344 
345 /* Accel framework public API for compare function */
346 int
347 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
348 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
349 			  void *cb_arg)
350 {
351 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
352 	struct spdk_accel_task *accel_task;
353 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPARE].module;
354 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPARE];
355 
356 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
357 	if (accel_task == NULL) {
358 		return -ENOMEM;
359 	}
360 
361 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
362 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
363 	accel_task->s.iovs[0].iov_base = src1;
364 	accel_task->s.iovs[0].iov_len = nbytes;
365 	accel_task->s.iovcnt = 1;
366 	accel_task->s2.iovs[0].iov_base = src2;
367 	accel_task->s2.iovs[0].iov_len = nbytes;
368 	accel_task->s2.iovcnt = 1;
369 	accel_task->op_code = ACCEL_OPC_COMPARE;
370 	accel_task->src_domain = NULL;
371 	accel_task->dst_domain = NULL;
372 	accel_task->step_cb_fn = NULL;
373 
374 	return module->submit_tasks(module_ch, accel_task);
375 }
376 
377 /* Accel framework public API for fill function */
378 int
379 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
380 		       uint8_t fill, uint64_t nbytes, int flags,
381 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
382 {
383 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
384 	struct spdk_accel_task *accel_task;
385 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_FILL].module;
386 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_FILL];
387 
388 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
389 	if (accel_task == NULL) {
390 		return -ENOMEM;
391 	}
392 
393 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
394 	accel_task->d.iovs[0].iov_base = dst;
395 	accel_task->d.iovs[0].iov_len = nbytes;
396 	accel_task->d.iovcnt = 1;
397 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
398 	accel_task->flags = flags;
399 	accel_task->op_code = ACCEL_OPC_FILL;
400 	accel_task->src_domain = NULL;
401 	accel_task->dst_domain = NULL;
402 	accel_task->step_cb_fn = NULL;
403 
404 	return module->submit_tasks(module_ch, accel_task);
405 }
406 
407 /* Accel framework public API for CRC-32C function */
408 int
409 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
410 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
411 			 void *cb_arg)
412 {
413 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
414 	struct spdk_accel_task *accel_task;
415 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
416 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
417 
418 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
419 	if (accel_task == NULL) {
420 		return -ENOMEM;
421 	}
422 
423 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
424 	accel_task->s.iovs[0].iov_base = src;
425 	accel_task->s.iovs[0].iov_len = nbytes;
426 	accel_task->s.iovcnt = 1;
427 	accel_task->crc_dst = crc_dst;
428 	accel_task->seed = seed;
429 	accel_task->op_code = ACCEL_OPC_CRC32C;
430 	accel_task->src_domain = NULL;
431 	accel_task->dst_domain = NULL;
432 	accel_task->step_cb_fn = NULL;
433 
434 	return module->submit_tasks(module_ch, accel_task);
435 }
436 
437 /* Accel framework public API for chained CRC-32C function */
438 int
439 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
440 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
441 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
442 {
443 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
444 	struct spdk_accel_task *accel_task;
445 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
446 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
447 
448 	if (iov == NULL) {
449 		SPDK_ERRLOG("iov should not be NULL");
450 		return -EINVAL;
451 	}
452 
453 	if (!iov_cnt) {
454 		SPDK_ERRLOG("iovcnt should not be zero value\n");
455 		return -EINVAL;
456 	}
457 
458 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
459 	if (accel_task == NULL) {
460 		SPDK_ERRLOG("no memory\n");
461 		assert(0);
462 		return -ENOMEM;
463 	}
464 
465 	accel_task->s.iovs = iov;
466 	accel_task->s.iovcnt = iov_cnt;
467 	accel_task->crc_dst = crc_dst;
468 	accel_task->seed = seed;
469 	accel_task->op_code = ACCEL_OPC_CRC32C;
470 	accel_task->src_domain = NULL;
471 	accel_task->dst_domain = NULL;
472 	accel_task->step_cb_fn = NULL;
473 
474 	return module->submit_tasks(module_ch, accel_task);
475 }
476 
477 /* Accel framework public API for copy with CRC-32C function */
478 int
479 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
480 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
481 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
482 {
483 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
484 	struct spdk_accel_task *accel_task;
485 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
486 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
487 
488 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
489 	if (accel_task == NULL) {
490 		return -ENOMEM;
491 	}
492 
493 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
494 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
495 	accel_task->d.iovs[0].iov_base = dst;
496 	accel_task->d.iovs[0].iov_len = nbytes;
497 	accel_task->d.iovcnt = 1;
498 	accel_task->s.iovs[0].iov_base = src;
499 	accel_task->s.iovs[0].iov_len = nbytes;
500 	accel_task->s.iovcnt = 1;
501 	accel_task->crc_dst = crc_dst;
502 	accel_task->seed = seed;
503 	accel_task->flags = flags;
504 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
505 	accel_task->src_domain = NULL;
506 	accel_task->dst_domain = NULL;
507 	accel_task->step_cb_fn = NULL;
508 
509 	return module->submit_tasks(module_ch, accel_task);
510 }
511 
512 /* Accel framework public API for chained copy + CRC-32C function */
513 int
514 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
515 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
516 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
517 {
518 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
519 	struct spdk_accel_task *accel_task;
520 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
521 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
522 	uint64_t nbytes;
523 	uint32_t i;
524 
525 	if (src_iovs == NULL) {
526 		SPDK_ERRLOG("iov should not be NULL");
527 		return -EINVAL;
528 	}
529 
530 	if (!iov_cnt) {
531 		SPDK_ERRLOG("iovcnt should not be zero value\n");
532 		return -EINVAL;
533 	}
534 
535 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
536 	if (accel_task == NULL) {
537 		SPDK_ERRLOG("no memory\n");
538 		assert(0);
539 		return -ENOMEM;
540 	}
541 
542 	nbytes = 0;
543 	for (i = 0; i < iov_cnt; i++) {
544 		nbytes += src_iovs[i].iov_len;
545 	}
546 
547 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
548 	accel_task->d.iovs[0].iov_base = dst;
549 	accel_task->d.iovs[0].iov_len = nbytes;
550 	accel_task->d.iovcnt = 1;
551 	accel_task->s.iovs = src_iovs;
552 	accel_task->s.iovcnt = iov_cnt;
553 	accel_task->crc_dst = crc_dst;
554 	accel_task->seed = seed;
555 	accel_task->flags = flags;
556 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
557 	accel_task->src_domain = NULL;
558 	accel_task->dst_domain = NULL;
559 	accel_task->step_cb_fn = NULL;
560 
561 	return module->submit_tasks(module_ch, accel_task);
562 }
563 
564 int
565 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
566 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
567 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
568 {
569 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
570 	struct spdk_accel_task *accel_task;
571 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPRESS].module;
572 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPRESS];
573 
574 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
575 	if (accel_task == NULL) {
576 		return -ENOMEM;
577 	}
578 
579 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
580 	accel_task->d.iovs[0].iov_base = dst;
581 	accel_task->d.iovs[0].iov_len = nbytes;
582 	accel_task->d.iovcnt = 1;
583 	accel_task->output_size = output_size;
584 	accel_task->s.iovs = src_iovs;
585 	accel_task->s.iovcnt = src_iovcnt;
586 	accel_task->flags = flags;
587 	accel_task->op_code = ACCEL_OPC_COMPRESS;
588 	accel_task->src_domain = NULL;
589 	accel_task->dst_domain = NULL;
590 	accel_task->step_cb_fn = NULL;
591 
592 	return module->submit_tasks(module_ch, accel_task);
593 
594 	return 0;
595 }
596 
597 int
598 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
599 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
600 			     int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
601 {
602 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
603 	struct spdk_accel_task *accel_task;
604 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECOMPRESS].module;
605 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECOMPRESS];
606 
607 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
608 	if (accel_task == NULL) {
609 		return -ENOMEM;
610 	}
611 
612 	accel_task->s.iovs = src_iovs;
613 	accel_task->s.iovcnt = src_iovcnt;
614 	accel_task->d.iovs = dst_iovs;
615 	accel_task->d.iovcnt = dst_iovcnt;
616 	accel_task->flags = flags;
617 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
618 	accel_task->src_domain = NULL;
619 	accel_task->dst_domain = NULL;
620 	accel_task->step_cb_fn = NULL;
621 
622 	return module->submit_tasks(module_ch, accel_task);
623 
624 	return 0;
625 }
626 
627 int
628 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
629 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
630 			  struct iovec *src_iovs, uint32_t src_iovcnt,
631 			  uint64_t iv, uint32_t block_size, int flags,
632 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
633 {
634 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
635 	struct spdk_accel_task *accel_task;
636 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
637 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_ENCRYPT];
638 
639 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
640 		return -EINVAL;
641 	}
642 
643 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
644 	if (accel_task == NULL) {
645 		return -ENOMEM;
646 	}
647 
648 	accel_task->crypto_key = key;
649 	accel_task->s.iovs = src_iovs;
650 	accel_task->s.iovcnt = src_iovcnt;
651 	accel_task->d.iovs = dst_iovs;
652 	accel_task->d.iovcnt = dst_iovcnt;
653 	accel_task->iv = iv;
654 	accel_task->block_size = block_size;
655 	accel_task->flags = flags;
656 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
657 
658 	return module->submit_tasks(module_ch, accel_task);
659 }
660 
661 int
662 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
663 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
664 			  struct iovec *src_iovs, uint32_t src_iovcnt,
665 			  uint64_t iv, uint32_t block_size, int flags,
666 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
667 {
668 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
669 	struct spdk_accel_task *accel_task;
670 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECRYPT].module;
671 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECRYPT];
672 
673 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
674 		return -EINVAL;
675 	}
676 
677 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
678 	if (accel_task == NULL) {
679 		return -ENOMEM;
680 	}
681 
682 	accel_task->crypto_key = key;
683 	accel_task->s.iovs = src_iovs;
684 	accel_task->s.iovcnt = src_iovcnt;
685 	accel_task->d.iovs = dst_iovs;
686 	accel_task->d.iovcnt = dst_iovcnt;
687 	accel_task->iv = iv;
688 	accel_task->block_size = block_size;
689 	accel_task->flags = flags;
690 	accel_task->op_code = ACCEL_OPC_DECRYPT;
691 
692 	return module->submit_tasks(module_ch, accel_task);
693 }
694 
695 static inline struct accel_buffer *
696 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
697 {
698 	struct accel_buffer *buf;
699 
700 	buf = TAILQ_FIRST(&ch->buf_pool);
701 	if (spdk_unlikely(buf == NULL)) {
702 		return NULL;
703 	}
704 
705 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
706 	buf->len = len;
707 	buf->buf = NULL;
708 	buf->seq = NULL;
709 
710 	return buf;
711 }
712 
713 static inline void
714 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
715 {
716 	if (buf->buf != NULL) {
717 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
718 	}
719 
720 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
721 }
722 
723 static inline struct spdk_accel_sequence *
724 accel_sequence_get(struct accel_io_channel *ch)
725 {
726 	struct spdk_accel_sequence *seq;
727 
728 	seq = TAILQ_FIRST(&ch->seq_pool);
729 	if (seq == NULL) {
730 		return NULL;
731 	}
732 
733 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
734 
735 	TAILQ_INIT(&seq->tasks);
736 	TAILQ_INIT(&seq->completed);
737 	TAILQ_INIT(&seq->bounce_bufs);
738 
739 	seq->ch = ch;
740 	seq->status = 0;
741 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
742 	seq->in_process_sequence = false;
743 
744 	return seq;
745 }
746 
747 static inline void
748 accel_sequence_put(struct spdk_accel_sequence *seq)
749 {
750 	struct accel_io_channel *ch = seq->ch;
751 	struct accel_buffer *buf;
752 
753 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
754 		buf = TAILQ_FIRST(&seq->bounce_bufs);
755 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
756 		accel_put_buf(seq->ch, buf);
757 	}
758 
759 	assert(TAILQ_EMPTY(&seq->tasks));
760 	assert(TAILQ_EMPTY(&seq->completed));
761 	seq->ch = NULL;
762 
763 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
764 }
765 
766 static void accel_sequence_task_cb(void *cb_arg, int status);
767 
768 static inline struct spdk_accel_task *
769 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
770 			spdk_accel_step_cb cb_fn, void *cb_arg)
771 {
772 	struct spdk_accel_task *task;
773 
774 	task = _get_task(ch, accel_sequence_task_cb, seq);
775 	if (task == NULL) {
776 		return task;
777 	}
778 
779 	task->step_cb_fn = cb_fn;
780 	task->step_cb_arg = cb_arg;
781 
782 	return task;
783 }
784 
785 int
786 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
787 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
788 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
789 		       struct iovec *src_iovs, uint32_t src_iovcnt,
790 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
791 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
792 {
793 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
794 	struct spdk_accel_task *task;
795 	struct spdk_accel_sequence *seq = *pseq;
796 
797 	if (seq == NULL) {
798 		seq = accel_sequence_get(accel_ch);
799 		if (spdk_unlikely(seq == NULL)) {
800 			return -ENOMEM;
801 		}
802 	}
803 
804 	assert(seq->ch == accel_ch);
805 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
806 	if (spdk_unlikely(task == NULL)) {
807 		if (*pseq == NULL) {
808 			accel_sequence_put(seq);
809 		}
810 
811 		return -ENOMEM;
812 	}
813 
814 	task->dst_domain = dst_domain;
815 	task->dst_domain_ctx = dst_domain_ctx;
816 	task->d.iovs = dst_iovs;
817 	task->d.iovcnt = dst_iovcnt;
818 	task->src_domain = src_domain;
819 	task->src_domain_ctx = src_domain_ctx;
820 	task->s.iovs = src_iovs;
821 	task->s.iovcnt = src_iovcnt;
822 	task->flags = flags;
823 	task->op_code = ACCEL_OPC_COPY;
824 
825 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
826 	*pseq = seq;
827 
828 	return 0;
829 }
830 
831 int
832 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
833 		       void *buf, uint64_t len,
834 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
835 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
836 {
837 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
838 	struct spdk_accel_task *task;
839 	struct spdk_accel_sequence *seq = *pseq;
840 
841 	if (seq == NULL) {
842 		seq = accel_sequence_get(accel_ch);
843 		if (spdk_unlikely(seq == NULL)) {
844 			return -ENOMEM;
845 		}
846 	}
847 
848 	assert(seq->ch == accel_ch);
849 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
850 	if (spdk_unlikely(task == NULL)) {
851 		if (*pseq == NULL) {
852 			accel_sequence_put(seq);
853 		}
854 
855 		return -ENOMEM;
856 	}
857 
858 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
859 
860 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
861 	task->d.iovs[0].iov_base = buf;
862 	task->d.iovs[0].iov_len = len;
863 	task->d.iovcnt = 1;
864 	task->src_domain = NULL;
865 	task->dst_domain = domain;
866 	task->dst_domain_ctx = domain_ctx;
867 	task->flags = flags;
868 	task->op_code = ACCEL_OPC_FILL;
869 
870 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
871 	*pseq = seq;
872 
873 	return 0;
874 }
875 
876 int
877 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
878 			     struct iovec *dst_iovs, size_t dst_iovcnt,
879 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
880 			     struct iovec *src_iovs, size_t src_iovcnt,
881 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
882 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
883 {
884 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
885 	struct spdk_accel_task *task;
886 	struct spdk_accel_sequence *seq = *pseq;
887 
888 	if (seq == NULL) {
889 		seq = accel_sequence_get(accel_ch);
890 		if (spdk_unlikely(seq == NULL)) {
891 			return -ENOMEM;
892 		}
893 	}
894 
895 	assert(seq->ch == accel_ch);
896 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
897 	if (spdk_unlikely(task == NULL)) {
898 		if (*pseq == NULL) {
899 			accel_sequence_put(seq);
900 		}
901 
902 		return -ENOMEM;
903 	}
904 
905 	task->dst_domain = dst_domain;
906 	task->dst_domain_ctx = dst_domain_ctx;
907 	task->d.iovs = dst_iovs;
908 	task->d.iovcnt = dst_iovcnt;
909 	task->src_domain = src_domain;
910 	task->src_domain_ctx = src_domain_ctx;
911 	task->s.iovs = src_iovs;
912 	task->s.iovcnt = src_iovcnt;
913 	task->flags = flags;
914 	task->op_code = ACCEL_OPC_DECOMPRESS;
915 
916 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
917 	*pseq = seq;
918 
919 	return 0;
920 }
921 
922 int
923 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
924 			  struct spdk_accel_crypto_key *key,
925 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
926 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
927 			  struct iovec *src_iovs, uint32_t src_iovcnt,
928 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
929 			  uint64_t iv, uint32_t block_size, int flags,
930 			  spdk_accel_step_cb cb_fn, void *cb_arg)
931 {
932 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
933 	struct spdk_accel_task *task;
934 	struct spdk_accel_sequence *seq = *pseq;
935 
936 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
937 			  !block_size)) {
938 		return -EINVAL;
939 	}
940 
941 	if (seq == NULL) {
942 		seq = accel_sequence_get(accel_ch);
943 		if (spdk_unlikely(seq == NULL)) {
944 			return -ENOMEM;
945 		}
946 	}
947 
948 	assert(seq->ch == accel_ch);
949 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
950 	if (spdk_unlikely(task == NULL)) {
951 		if (*pseq == NULL) {
952 			accel_sequence_put(seq);
953 		}
954 
955 		return -ENOMEM;
956 	}
957 
958 	task->crypto_key = key;
959 	task->src_domain = src_domain;
960 	task->src_domain_ctx = src_domain_ctx;
961 	task->s.iovs = src_iovs;
962 	task->s.iovcnt = src_iovcnt;
963 	task->dst_domain = dst_domain;
964 	task->dst_domain_ctx = dst_domain_ctx;
965 	task->d.iovs = dst_iovs;
966 	task->d.iovcnt = dst_iovcnt;
967 	task->iv = iv;
968 	task->block_size = block_size;
969 	task->flags = flags;
970 	task->op_code = ACCEL_OPC_ENCRYPT;
971 
972 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
973 	*pseq = seq;
974 
975 	return 0;
976 }
977 
978 int
979 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
980 			  struct spdk_accel_crypto_key *key,
981 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
982 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
983 			  struct iovec *src_iovs, uint32_t src_iovcnt,
984 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
985 			  uint64_t iv, uint32_t block_size, int flags,
986 			  spdk_accel_step_cb cb_fn, void *cb_arg)
987 {
988 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
989 	struct spdk_accel_task *task;
990 	struct spdk_accel_sequence *seq = *pseq;
991 
992 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
993 			  !block_size)) {
994 		return -EINVAL;
995 	}
996 
997 	if (seq == NULL) {
998 		seq = accel_sequence_get(accel_ch);
999 		if (spdk_unlikely(seq == NULL)) {
1000 			return -ENOMEM;
1001 		}
1002 	}
1003 
1004 	assert(seq->ch == accel_ch);
1005 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1006 	if (spdk_unlikely(task == NULL)) {
1007 		if (*pseq == NULL) {
1008 			accel_sequence_put(seq);
1009 		}
1010 
1011 		return -ENOMEM;
1012 	}
1013 
1014 	task->crypto_key = key;
1015 	task->src_domain = src_domain;
1016 	task->src_domain_ctx = src_domain_ctx;
1017 	task->s.iovs = src_iovs;
1018 	task->s.iovcnt = src_iovcnt;
1019 	task->dst_domain = dst_domain;
1020 	task->dst_domain_ctx = dst_domain_ctx;
1021 	task->d.iovs = dst_iovs;
1022 	task->d.iovcnt = dst_iovcnt;
1023 	task->iv = iv;
1024 	task->block_size = block_size;
1025 	task->flags = flags;
1026 	task->op_code = ACCEL_OPC_DECRYPT;
1027 
1028 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1029 	*pseq = seq;
1030 
1031 	return 0;
1032 }
1033 
1034 int
1035 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1036 		   struct spdk_memory_domain **domain, void **domain_ctx)
1037 {
1038 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1039 	struct accel_buffer *accel_buf;
1040 
1041 	accel_buf = accel_get_buf(accel_ch, len);
1042 	if (spdk_unlikely(accel_buf == NULL)) {
1043 		return -ENOMEM;
1044 	}
1045 
1046 	/* We always return the same pointer and identify the buffers through domain_ctx */
1047 	*buf = ACCEL_BUFFER_BASE;
1048 	*domain_ctx = accel_buf;
1049 	*domain = g_accel_domain;
1050 
1051 	return 0;
1052 }
1053 
1054 void
1055 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1056 		   struct spdk_memory_domain *domain, void *domain_ctx)
1057 {
1058 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1059 	struct accel_buffer *accel_buf = domain_ctx;
1060 
1061 	assert(domain == g_accel_domain);
1062 	assert(buf == ACCEL_BUFFER_BASE);
1063 
1064 	accel_put_buf(accel_ch, accel_buf);
1065 }
1066 
1067 static void
1068 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1069 {
1070 	struct spdk_accel_task *task;
1071 	struct accel_io_channel *ch = seq->ch;
1072 	spdk_accel_step_cb cb_fn;
1073 	void *cb_arg;
1074 
1075 	while (!TAILQ_EMPTY(&seq->completed)) {
1076 		task = TAILQ_FIRST(&seq->completed);
1077 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1078 		cb_fn = task->step_cb_fn;
1079 		cb_arg = task->step_cb_arg;
1080 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1081 		if (cb_fn != NULL) {
1082 			cb_fn(cb_arg);
1083 		}
1084 	}
1085 
1086 	while (!TAILQ_EMPTY(&seq->tasks)) {
1087 		task = TAILQ_FIRST(&seq->tasks);
1088 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1089 		cb_fn = task->step_cb_fn;
1090 		cb_arg = task->step_cb_arg;
1091 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1092 		if (cb_fn != NULL) {
1093 			cb_fn(cb_arg);
1094 		}
1095 	}
1096 }
1097 
1098 static void
1099 accel_sequence_complete(struct spdk_accel_sequence *seq)
1100 {
1101 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1102 
1103 	/* First notify all users that appended operations to this sequence */
1104 	accel_sequence_complete_tasks(seq);
1105 
1106 	/* Then notify the user that finished the sequence */
1107 	seq->cb_fn(seq->cb_arg, seq->status);
1108 
1109 	accel_sequence_put(seq);
1110 }
1111 
1112 static void
1113 accel_update_buf(void **buf, struct accel_buffer *accel_buf)
1114 {
1115 	uintptr_t offset;
1116 
1117 	offset = (uintptr_t)(*buf) & ACCEL_BUFFER_OFFSET_MASK;
1118 	assert(offset < accel_buf->len);
1119 
1120 	*buf = (char *)accel_buf->buf + offset;
1121 }
1122 
1123 static void
1124 accel_update_iovs(struct iovec *iovs, uint32_t iovcnt, struct accel_buffer *buf)
1125 {
1126 	uint32_t i;
1127 
1128 	for (i = 0; i < iovcnt; ++i) {
1129 		accel_update_buf(&iovs[i].iov_base, buf);
1130 	}
1131 }
1132 
1133 static void
1134 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1135 {
1136 	struct spdk_accel_task *task;
1137 
1138 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1139 	 * in a sequence that were using it.
1140 	 */
1141 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1142 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1143 			accel_update_iovs(task->s.iovs, task->s.iovcnt, buf);
1144 			task->src_domain = NULL;
1145 		}
1146 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1147 			accel_update_iovs(task->d.iovs, task->d.iovcnt, buf);
1148 			task->dst_domain = NULL;
1149 		}
1150 	}
1151 }
1152 
1153 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1154 
1155 static void
1156 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1157 {
1158 	struct accel_buffer *accel_buf;
1159 
1160 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1161 
1162 	assert(accel_buf->seq != NULL);
1163 	assert(accel_buf->buf == NULL);
1164 	accel_buf->buf = buf;
1165 
1166 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1167 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1168 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1169 	accel_process_sequence(accel_buf->seq);
1170 }
1171 
1172 static bool
1173 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1174 			 spdk_iobuf_get_cb cb_fn)
1175 {
1176 	struct accel_io_channel *ch = seq->ch;
1177 
1178 	assert(buf->buf == NULL);
1179 	assert(buf->seq == NULL);
1180 
1181 	buf->seq = seq;
1182 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1183 	if (buf->buf == NULL) {
1184 		return false;
1185 	}
1186 
1187 	return true;
1188 }
1189 
1190 static bool
1191 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1192 {
1193 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1194 	 * NULL */
1195 	if (task->src_domain == g_accel_domain) {
1196 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1197 					      accel_iobuf_get_virtbuf_cb)) {
1198 			return false;
1199 		}
1200 
1201 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1202 	}
1203 
1204 	if (task->dst_domain == g_accel_domain) {
1205 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1206 					      accel_iobuf_get_virtbuf_cb)) {
1207 			return false;
1208 		}
1209 
1210 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1211 	}
1212 
1213 	return true;
1214 }
1215 
1216 static inline uint64_t
1217 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1218 {
1219 	uint64_t result = 0;
1220 	uint32_t i;
1221 
1222 	for (i = 0; i < iovcnt; ++i) {
1223 		result += iovs[i].iov_len;
1224 	}
1225 
1226 	return result;
1227 }
1228 
1229 static inline void
1230 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1231 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1232 			struct accel_buffer *buf)
1233 {
1234 	bounce->orig_iovs = *iovs;
1235 	bounce->orig_iovcnt = *iovcnt;
1236 	bounce->orig_domain = *domain;
1237 	bounce->orig_domain_ctx = *domain_ctx;
1238 	bounce->iov.iov_base = buf->buf;
1239 	bounce->iov.iov_len = buf->len;
1240 
1241 	*iovs = &bounce->iov;
1242 	*iovcnt = 1;
1243 	*domain = NULL;
1244 }
1245 
1246 static void
1247 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1248 {
1249 	struct spdk_accel_task *task;
1250 	struct accel_buffer *accel_buf;
1251 
1252 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1253 	assert(accel_buf->buf == NULL);
1254 	accel_buf->buf = buf;
1255 
1256 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1257 	assert(task != NULL);
1258 
1259 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1260 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1261 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1262 				&task->src_domain_ctx, accel_buf);
1263 	accel_process_sequence(accel_buf->seq);
1264 }
1265 
1266 static void
1267 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1268 {
1269 	struct spdk_accel_task *task;
1270 	struct accel_buffer *accel_buf;
1271 
1272 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1273 	assert(accel_buf->buf == NULL);
1274 	accel_buf->buf = buf;
1275 
1276 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1277 	assert(task != NULL);
1278 
1279 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1280 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1281 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1282 				&task->dst_domain_ctx, accel_buf);
1283 	accel_process_sequence(accel_buf->seq);
1284 }
1285 
1286 static int
1287 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1288 {
1289 	struct accel_buffer *buf;
1290 
1291 	if (task->src_domain != NULL) {
1292 		/* By the time we're here, accel buffers should have been allocated */
1293 		assert(task->src_domain != g_accel_domain);
1294 
1295 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1296 		if (buf == NULL) {
1297 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1298 			return -ENOMEM;
1299 		}
1300 
1301 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1302 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1303 			return -EAGAIN;
1304 		}
1305 
1306 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1307 					&task->src_domain, &task->src_domain_ctx, buf);
1308 	}
1309 
1310 	if (task->dst_domain != NULL) {
1311 		/* By the time we're here, accel buffers should have been allocated */
1312 		assert(task->dst_domain != g_accel_domain);
1313 
1314 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1315 		if (buf == NULL) {
1316 			/* The src buffer will be released when a sequence is completed */
1317 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1318 			return -ENOMEM;
1319 		}
1320 
1321 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1322 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1323 			return -EAGAIN;
1324 		}
1325 
1326 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1327 					&task->dst_domain, &task->dst_domain_ctx, buf);
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 static void
1334 accel_task_pull_data_cb(void *ctx, int status)
1335 {
1336 	struct spdk_accel_sequence *seq = ctx;
1337 
1338 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1339 	if (spdk_likely(status == 0)) {
1340 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1341 	} else {
1342 		accel_sequence_set_fail(seq, status);
1343 	}
1344 
1345 	accel_process_sequence(seq);
1346 }
1347 
1348 static void
1349 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1350 {
1351 	int rc;
1352 
1353 	assert(task->bounce.s.orig_iovs != NULL);
1354 	assert(task->bounce.s.orig_domain != NULL);
1355 	assert(task->bounce.s.orig_domain != g_accel_domain);
1356 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1357 
1358 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1359 					  task->bounce.s.orig_domain_ctx,
1360 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1361 					  task->s.iovs, task->s.iovcnt,
1362 					  accel_task_pull_data_cb, seq);
1363 	if (spdk_unlikely(rc != 0)) {
1364 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1365 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1366 		accel_sequence_set_fail(seq, rc);
1367 	}
1368 }
1369 
1370 static void
1371 accel_task_push_data_cb(void *ctx, int status)
1372 {
1373 	struct spdk_accel_sequence *seq = ctx;
1374 
1375 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1376 	if (spdk_likely(status == 0)) {
1377 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1378 	} else {
1379 		accel_sequence_set_fail(seq, status);
1380 	}
1381 
1382 	accel_process_sequence(seq);
1383 }
1384 
1385 static void
1386 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1387 {
1388 	int rc;
1389 
1390 	assert(task->bounce.d.orig_iovs != NULL);
1391 	assert(task->bounce.d.orig_domain != NULL);
1392 	assert(task->bounce.d.orig_domain != g_accel_domain);
1393 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1394 
1395 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1396 					  task->bounce.d.orig_domain_ctx,
1397 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1398 					  task->d.iovs, task->d.iovcnt,
1399 					  accel_task_push_data_cb, seq);
1400 	if (spdk_unlikely(rc != 0)) {
1401 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1402 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1403 		accel_sequence_set_fail(seq, rc);
1404 	}
1405 }
1406 
1407 static void
1408 accel_process_sequence(struct spdk_accel_sequence *seq)
1409 {
1410 	struct accel_io_channel *accel_ch = seq->ch;
1411 	struct spdk_accel_module_if *module;
1412 	struct spdk_io_channel *module_ch;
1413 	struct spdk_accel_task *task;
1414 	enum accel_sequence_state state;
1415 	int rc;
1416 
1417 	/* Prevent recursive calls to this function */
1418 	if (spdk_unlikely(seq->in_process_sequence)) {
1419 		return;
1420 	}
1421 	seq->in_process_sequence = true;
1422 
1423 	task = TAILQ_FIRST(&seq->tasks);
1424 	assert(task != NULL);
1425 
1426 	do {
1427 		state = seq->state;
1428 		switch (state) {
1429 		case ACCEL_SEQUENCE_STATE_INIT:
1430 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1431 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1432 			if (!accel_sequence_check_virtbuf(seq, task)) {
1433 				/* We couldn't allocate a buffer, wait until one is available */
1434 				break;
1435 			}
1436 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1437 		/* Fall through */
1438 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1439 			/* If a module supports memory domains, we don't need to allocate bounce
1440 			 * buffers */
1441 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1442 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1443 				break;
1444 			}
1445 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1446 			rc = accel_sequence_check_bouncebuf(seq, task);
1447 			if (rc != 0) {
1448 				/* We couldn't allocate a buffer, wait until one is available */
1449 				if (rc == -EAGAIN) {
1450 					break;
1451 				}
1452 				accel_sequence_set_fail(seq, rc);
1453 				break;
1454 			}
1455 			if (task->bounce.s.orig_iovs != NULL) {
1456 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1457 				break;
1458 			}
1459 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1460 		/* Fall through */
1461 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1462 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1463 				      g_opcode_strings[task->op_code], seq);
1464 
1465 			module = g_modules_opc[task->op_code].module;
1466 			module_ch = accel_ch->module_ch[task->op_code];
1467 
1468 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1469 			rc = module->submit_tasks(module_ch, task);
1470 			if (spdk_unlikely(rc != 0)) {
1471 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1472 					    g_opcode_strings[task->op_code], seq);
1473 				accel_sequence_set_fail(seq, rc);
1474 			}
1475 			break;
1476 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1477 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1478 			accel_task_pull_data(seq, task);
1479 			break;
1480 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1481 			if (task->bounce.d.orig_iovs != NULL) {
1482 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1483 				break;
1484 			}
1485 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1486 			break;
1487 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1488 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1489 			accel_task_push_data(seq, task);
1490 			break;
1491 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1492 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1493 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1494 			/* Check if there are any remaining tasks */
1495 			task = TAILQ_FIRST(&seq->tasks);
1496 			if (task == NULL) {
1497 				/* Immediately return here to make sure we don't touch the sequence
1498 				 * after it's completed */
1499 				accel_sequence_complete(seq);
1500 				return;
1501 			}
1502 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1503 			break;
1504 		case ACCEL_SEQUENCE_STATE_ERROR:
1505 			/* Immediately return here to make sure we don't touch the sequence
1506 			 * after it's completed */
1507 			assert(seq->status != 0);
1508 			accel_sequence_complete(seq);
1509 			return;
1510 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1511 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1512 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1513 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1514 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1515 			break;
1516 		default:
1517 			assert(0 && "bad state");
1518 			break;
1519 		}
1520 	} while (seq->state != state);
1521 
1522 	seq->in_process_sequence = false;
1523 }
1524 
1525 static void
1526 accel_sequence_task_cb(void *cb_arg, int status)
1527 {
1528 	struct spdk_accel_sequence *seq = cb_arg;
1529 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1530 	struct accel_io_channel *accel_ch = seq->ch;
1531 
1532 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1533 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1534 	 * easiest way to prevent this, even though it is a bit hacky.
1535 	 */
1536 	assert(task != NULL);
1537 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1538 
1539 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1540 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1541 
1542 	if (spdk_unlikely(status != 0)) {
1543 		SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1544 			    g_opcode_strings[task->op_code], seq);
1545 		accel_sequence_set_fail(seq, status);
1546 	}
1547 
1548 	accel_process_sequence(seq);
1549 }
1550 
1551 static bool
1552 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1553 {
1554 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1555 	if (iovacnt != iovbcnt) {
1556 		return false;
1557 	}
1558 
1559 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1560 }
1561 
1562 static void
1563 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1564 			   struct spdk_accel_task **next_task)
1565 {
1566 	struct spdk_accel_task *next = *next_task;
1567 
1568 	switch (task->op_code) {
1569 	case ACCEL_OPC_COPY:
1570 		/* We only allow changing src of operations that actually have a src, e.g. we never
1571 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1572 		 * change the src of the operation after fill (which in turn could also be a fill).
1573 		 * So, for the sake of simplicity, skip this type of operations for now.
1574 		 */
1575 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1576 		    next->op_code != ACCEL_OPC_COPY &&
1577 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1578 		    next->op_code != ACCEL_OPC_DECRYPT) {
1579 			break;
1580 		}
1581 		if (task->dst_domain != next->src_domain) {
1582 			break;
1583 		}
1584 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1585 					next->s.iovs, next->s.iovcnt)) {
1586 			break;
1587 		}
1588 		next->s.iovs = task->s.iovs;
1589 		next->s.iovcnt = task->s.iovcnt;
1590 		next->src_domain = task->src_domain;
1591 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1592 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1593 		break;
1594 	case ACCEL_OPC_DECOMPRESS:
1595 	case ACCEL_OPC_FILL:
1596 	case ACCEL_OPC_ENCRYPT:
1597 	case ACCEL_OPC_DECRYPT:
1598 		/* We can only merge tasks when one of them is a copy */
1599 		if (next->op_code != ACCEL_OPC_COPY) {
1600 			break;
1601 		}
1602 		if (task->dst_domain != next->src_domain) {
1603 			break;
1604 		}
1605 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1606 					next->s.iovs, next->s.iovcnt)) {
1607 			break;
1608 		}
1609 		task->d.iovs = next->d.iovs;
1610 		task->d.iovcnt = next->d.iovcnt;
1611 		task->dst_domain = next->dst_domain;
1612 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1613 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1614 		*next_task = TAILQ_NEXT(next, seq_link);
1615 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1616 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1617 		break;
1618 	default:
1619 		assert(0 && "bad opcode");
1620 		break;
1621 	}
1622 }
1623 
1624 int
1625 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1626 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1627 {
1628 	struct spdk_accel_task *task, *next;
1629 
1630 	/* Try to remove any copy operations if possible */
1631 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1632 		if (next == NULL) {
1633 			break;
1634 		}
1635 		accel_sequence_merge_tasks(seq, task, &next);
1636 	}
1637 
1638 	seq->cb_fn = cb_fn;
1639 	seq->cb_arg = cb_arg;
1640 
1641 	accel_process_sequence(seq);
1642 
1643 	return 0;
1644 }
1645 
1646 void
1647 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1648 {
1649 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1650 	struct spdk_accel_task *task;
1651 
1652 	assert(TAILQ_EMPTY(&seq->completed));
1653 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1654 
1655 	while (!TAILQ_EMPTY(&tasks)) {
1656 		task = TAILQ_FIRST(&tasks);
1657 		TAILQ_REMOVE(&tasks, task, seq_link);
1658 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1659 	}
1660 }
1661 
1662 void
1663 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1664 {
1665 	if (seq == NULL) {
1666 		return;
1667 	}
1668 
1669 	accel_sequence_complete_tasks(seq);
1670 	accel_sequence_put(seq);
1671 }
1672 
1673 static struct spdk_accel_module_if *
1674 _module_find_by_name(const char *name)
1675 {
1676 	struct spdk_accel_module_if *accel_module = NULL;
1677 
1678 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1679 		if (strcmp(name, accel_module->name) == 0) {
1680 			break;
1681 		}
1682 	}
1683 
1684 	return accel_module;
1685 }
1686 
1687 static inline struct spdk_accel_crypto_key *
1688 _accel_crypto_key_get(const char *name)
1689 {
1690 	struct spdk_accel_crypto_key *key;
1691 
1692 	assert(spdk_spin_held(&g_keyring_spin));
1693 
1694 	TAILQ_FOREACH(key, &g_keyring, link) {
1695 		if (strcmp(name, key->param.key_name) == 0) {
1696 			return key;
1697 		}
1698 	}
1699 
1700 	return NULL;
1701 }
1702 
1703 static void
1704 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1705 {
1706 	if (key->param.hex_key) {
1707 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1708 		free(key->param.hex_key);
1709 	}
1710 	if (key->param.hex_key2) {
1711 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1712 		free(key->param.hex_key2);
1713 	}
1714 	free(key->param.key_name);
1715 	free(key->param.cipher);
1716 	if (key->key) {
1717 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1718 		free(key->key);
1719 	}
1720 	if (key->key2) {
1721 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1722 		free(key->key2);
1723 	}
1724 	free(key);
1725 }
1726 
1727 static void
1728 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1729 {
1730 	assert(key->module_if);
1731 	assert(key->module_if->crypto_key_deinit);
1732 
1733 	key->module_if->crypto_key_deinit(key);
1734 	accel_crypto_key_free_mem(key);
1735 }
1736 
1737 /*
1738  * This function mitigates a timing side channel which could be caused by using strcmp()
1739  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1740  * the article [1] for more details
1741  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1742  */
1743 static bool
1744 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1745 {
1746 	size_t i;
1747 	volatile size_t x = k1_len ^ k2_len;
1748 
1749 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1750 		x |= k1[i] ^ k2[i];
1751 	}
1752 
1753 	return x == 0;
1754 }
1755 
1756 int
1757 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1758 {
1759 	struct spdk_accel_module_if *module;
1760 	struct spdk_accel_crypto_key *key;
1761 	size_t hex_key_size, hex_key2_size;
1762 	int rc;
1763 
1764 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1765 		return -EINVAL;
1766 	}
1767 
1768 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1769 		/* hardly ever possible, but let's check and warn the user */
1770 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1771 	}
1772 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1773 
1774 	if (!module) {
1775 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1776 		return -ENOENT;
1777 	}
1778 	if (!module->crypto_key_init) {
1779 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1780 		return -ENOTSUP;
1781 	}
1782 
1783 	key = calloc(1, sizeof(*key));
1784 	if (!key) {
1785 		return -ENOMEM;
1786 	}
1787 
1788 	key->param.key_name = strdup(param->key_name);
1789 	if (!key->param.key_name) {
1790 		rc = -ENOMEM;
1791 		goto error;
1792 	}
1793 
1794 	key->param.cipher = strdup(param->cipher);
1795 	if (!key->param.cipher) {
1796 		rc = -ENOMEM;
1797 		goto error;
1798 	}
1799 
1800 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1801 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1802 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1803 		rc = -EINVAL;
1804 		goto error;
1805 	}
1806 	key->param.hex_key = strdup(param->hex_key);
1807 	if (!key->param.hex_key) {
1808 		rc = -ENOMEM;
1809 		goto error;
1810 	}
1811 
1812 	key->key_size = hex_key_size / 2;
1813 	key->key = spdk_unhexlify(key->param.hex_key);
1814 	if (!key->key) {
1815 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1816 		rc = -EINVAL;
1817 		goto error;
1818 	}
1819 
1820 	if (param->hex_key2) {
1821 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1822 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1823 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1824 			rc = -EINVAL;
1825 			goto error;
1826 		}
1827 		key->param.hex_key2 = strdup(param->hex_key2);
1828 		if (!key->param.hex_key2) {
1829 			rc = -ENOMEM;
1830 			goto error;
1831 		}
1832 
1833 		key->key2_size = hex_key2_size / 2;
1834 		key->key2 = spdk_unhexlify(key->param.hex_key2);
1835 		if (!key->key2) {
1836 			SPDK_ERRLOG("Failed to unhexlify key2\n");
1837 			rc = -EINVAL;
1838 			goto error;
1839 		}
1840 
1841 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
1842 			SPDK_ERRLOG("Identical keys are not secure\n");
1843 			rc = -EINVAL;
1844 			goto error;
1845 		}
1846 	}
1847 
1848 	key->module_if = module;
1849 
1850 	spdk_spin_lock(&g_keyring_spin);
1851 	if (_accel_crypto_key_get(param->key_name)) {
1852 		rc = -EEXIST;
1853 	} else {
1854 		rc = module->crypto_key_init(key);
1855 		if (!rc) {
1856 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
1857 		}
1858 	}
1859 	spdk_spin_unlock(&g_keyring_spin);
1860 
1861 	if (rc) {
1862 		goto error;
1863 	}
1864 
1865 	return 0;
1866 
1867 error:
1868 	accel_crypto_key_free_mem(key);
1869 	return rc;
1870 }
1871 
1872 int
1873 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
1874 {
1875 	if (!key || !key->module_if) {
1876 		return -EINVAL;
1877 	}
1878 
1879 	spdk_spin_lock(&g_keyring_spin);
1880 	if (!_accel_crypto_key_get(key->param.key_name)) {
1881 		spdk_spin_unlock(&g_keyring_spin);
1882 		return -ENOENT;
1883 	}
1884 	TAILQ_REMOVE(&g_keyring, key, link);
1885 	spdk_spin_unlock(&g_keyring_spin);
1886 
1887 	accel_crypto_key_destroy_unsafe(key);
1888 
1889 	return 0;
1890 }
1891 
1892 struct spdk_accel_crypto_key *
1893 spdk_accel_crypto_key_get(const char *name)
1894 {
1895 	struct spdk_accel_crypto_key *key;
1896 
1897 	spdk_spin_lock(&g_keyring_spin);
1898 	key = _accel_crypto_key_get(name);
1899 	spdk_spin_unlock(&g_keyring_spin);
1900 
1901 	return key;
1902 }
1903 
1904 /* Helper function when accel modules register with the framework. */
1905 void
1906 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
1907 {
1908 	if (_module_find_by_name(accel_module->name)) {
1909 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
1910 		assert(false);
1911 		return;
1912 	}
1913 
1914 	/* Make sure that the software module is at the head of the list, this
1915 	 * will assure that all opcodes are later assigned to software first and
1916 	 * then updated to HW modules as they are registered.
1917 	 */
1918 	if (strcmp(accel_module->name, "software") == 0) {
1919 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
1920 	} else {
1921 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
1922 	}
1923 
1924 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
1925 		g_max_accel_module_size = accel_module->get_ctx_size();
1926 	}
1927 }
1928 
1929 /* Framework level channel create callback. */
1930 static int
1931 accel_create_channel(void *io_device, void *ctx_buf)
1932 {
1933 	struct accel_io_channel	*accel_ch = ctx_buf;
1934 	struct spdk_accel_task *accel_task;
1935 	struct spdk_accel_sequence *seq;
1936 	struct accel_buffer *buf;
1937 	uint8_t *task_mem;
1938 	int i = 0, j, rc;
1939 
1940 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
1941 	if (accel_ch->task_pool_base == NULL) {
1942 		return -ENOMEM;
1943 	}
1944 
1945 	accel_ch->seq_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct spdk_accel_sequence));
1946 	if (accel_ch->seq_pool_base == NULL) {
1947 		goto err;
1948 	}
1949 
1950 	accel_ch->buf_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct accel_buffer));
1951 	if (accel_ch->buf_pool_base == NULL) {
1952 		goto err;
1953 	}
1954 
1955 	TAILQ_INIT(&accel_ch->task_pool);
1956 	TAILQ_INIT(&accel_ch->seq_pool);
1957 	TAILQ_INIT(&accel_ch->buf_pool);
1958 	task_mem = accel_ch->task_pool_base;
1959 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
1960 		accel_task = (struct spdk_accel_task *)task_mem;
1961 		seq = &accel_ch->seq_pool_base[i];
1962 		buf = &accel_ch->buf_pool_base[i];
1963 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
1964 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
1965 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
1966 		task_mem += g_max_accel_module_size;
1967 	}
1968 
1969 	/* Assign modules and get IO channels for each */
1970 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
1971 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
1972 		/* This can happen if idxd runs out of channels. */
1973 		if (accel_ch->module_ch[i] == NULL) {
1974 			goto err;
1975 		}
1976 	}
1977 
1978 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", ACCEL_SMALL_CACHE_SIZE,
1979 				     ACCEL_LARGE_CACHE_SIZE);
1980 	if (rc != 0) {
1981 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
1982 		goto err;
1983 	}
1984 
1985 	return 0;
1986 err:
1987 	for (j = 0; j < i; j++) {
1988 		spdk_put_io_channel(accel_ch->module_ch[j]);
1989 	}
1990 	free(accel_ch->task_pool_base);
1991 	free(accel_ch->seq_pool_base);
1992 	free(accel_ch->buf_pool_base);
1993 	return -ENOMEM;
1994 }
1995 
1996 /* Framework level channel destroy callback. */
1997 static void
1998 accel_destroy_channel(void *io_device, void *ctx_buf)
1999 {
2000 	struct accel_io_channel	*accel_ch = ctx_buf;
2001 	int i;
2002 
2003 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2004 
2005 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2006 		assert(accel_ch->module_ch[i] != NULL);
2007 		spdk_put_io_channel(accel_ch->module_ch[i]);
2008 		accel_ch->module_ch[i] = NULL;
2009 	}
2010 
2011 	free(accel_ch->task_pool_base);
2012 	free(accel_ch->seq_pool_base);
2013 	free(accel_ch->buf_pool_base);
2014 }
2015 
2016 struct spdk_io_channel *
2017 spdk_accel_get_io_channel(void)
2018 {
2019 	return spdk_get_io_channel(&spdk_accel_module_list);
2020 }
2021 
2022 static void
2023 accel_module_initialize(void)
2024 {
2025 	struct spdk_accel_module_if *accel_module;
2026 
2027 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2028 		accel_module->module_init();
2029 	}
2030 }
2031 
2032 static void
2033 accel_module_init_opcode(enum accel_opcode opcode)
2034 {
2035 	struct accel_module *module = &g_modules_opc[opcode];
2036 	struct spdk_accel_module_if *module_if = module->module;
2037 
2038 	if (module_if->get_memory_domains != NULL) {
2039 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2040 	}
2041 }
2042 
2043 int
2044 spdk_accel_initialize(void)
2045 {
2046 	enum accel_opcode op;
2047 	struct spdk_accel_module_if *accel_module = NULL;
2048 	int rc;
2049 
2050 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2051 				       "SPDK_ACCEL_DMA_DEVICE");
2052 	if (rc != 0) {
2053 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2054 		return rc;
2055 	}
2056 
2057 	spdk_spin_init(&g_keyring_spin);
2058 
2059 	g_modules_started = true;
2060 	accel_module_initialize();
2061 
2062 	/* Create our priority global map of opcodes to modules, we populate starting
2063 	 * with the software module (guaranteed to be first on the list) and then
2064 	 * updating opcodes with HW modules that have been initialized.
2065 	 * NOTE: all opcodes must be supported by software in the event that no HW
2066 	 * modules are initialized to support the operation.
2067 	 */
2068 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2069 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2070 			if (accel_module->supports_opcode(op)) {
2071 				g_modules_opc[op].module = accel_module;
2072 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2073 			}
2074 		}
2075 	}
2076 
2077 	/* Now lets check for overrides and apply all that exist */
2078 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2079 		if (g_modules_opc_override[op] != NULL) {
2080 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2081 			if (accel_module == NULL) {
2082 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2083 				rc = -EINVAL;
2084 				goto error;
2085 			}
2086 			if (accel_module->supports_opcode(op) == false) {
2087 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2088 				rc = -EINVAL;
2089 				goto error;
2090 			}
2091 			g_modules_opc[op].module = accel_module;
2092 		}
2093 	}
2094 
2095 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2096 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2097 		rc = -EINVAL;
2098 		goto error;
2099 	}
2100 
2101 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2102 		assert(g_modules_opc[op].module != NULL);
2103 		accel_module_init_opcode(op);
2104 	}
2105 
2106 	rc = spdk_iobuf_register_module("accel");
2107 	if (rc != 0) {
2108 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2109 		goto error;
2110 	}
2111 
2112 	/*
2113 	 * We need a unique identifier for the accel framework, so use the
2114 	 * spdk_accel_module_list address for this purpose.
2115 	 */
2116 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2117 				sizeof(struct accel_io_channel), "accel");
2118 
2119 	return 0;
2120 error:
2121 	spdk_memory_domain_destroy(g_accel_domain);
2122 
2123 	return rc;
2124 }
2125 
2126 static void
2127 accel_module_finish_cb(void)
2128 {
2129 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2130 
2131 	spdk_memory_domain_destroy(g_accel_domain);
2132 
2133 	cb_fn(g_fini_cb_arg);
2134 	g_fini_cb_fn = NULL;
2135 	g_fini_cb_arg = NULL;
2136 }
2137 
2138 static void
2139 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2140 			   const char *module_str)
2141 {
2142 	spdk_json_write_object_begin(w);
2143 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2144 	spdk_json_write_named_object_begin(w, "params");
2145 	spdk_json_write_named_string(w, "opname", opc_str);
2146 	spdk_json_write_named_string(w, "module", module_str);
2147 	spdk_json_write_object_end(w);
2148 	spdk_json_write_object_end(w);
2149 }
2150 
2151 static void
2152 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2153 {
2154 	spdk_json_write_named_string(w, "name", key->param.key_name);
2155 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2156 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2157 	if (key->param.hex_key2) {
2158 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2159 	}
2160 }
2161 
2162 void
2163 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2164 {
2165 	spdk_json_write_object_begin(w);
2166 	__accel_crypto_key_dump_param(w, key);
2167 	spdk_json_write_object_end(w);
2168 }
2169 
2170 static void
2171 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2172 				    struct spdk_accel_crypto_key *key)
2173 {
2174 	spdk_json_write_object_begin(w);
2175 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2176 	spdk_json_write_named_object_begin(w, "params");
2177 	__accel_crypto_key_dump_param(w, key);
2178 	spdk_json_write_object_end(w);
2179 	spdk_json_write_object_end(w);
2180 }
2181 
2182 static void
2183 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2184 {
2185 	struct spdk_accel_crypto_key *key;
2186 
2187 	spdk_spin_lock(&g_keyring_spin);
2188 	TAILQ_FOREACH(key, &g_keyring, link) {
2189 		if (full_dump) {
2190 			_accel_crypto_key_write_config_json(w, key);
2191 		} else {
2192 			_accel_crypto_key_dump_param(w, key);
2193 		}
2194 	}
2195 	spdk_spin_unlock(&g_keyring_spin);
2196 }
2197 
2198 void
2199 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2200 {
2201 	_accel_crypto_keys_write_config_json(w, false);
2202 }
2203 
2204 void
2205 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2206 {
2207 	struct spdk_accel_module_if *accel_module;
2208 	int i;
2209 
2210 	/*
2211 	 * The accel fw has no config, there may be some in
2212 	 * the modules though.
2213 	 */
2214 	spdk_json_write_array_begin(w);
2215 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2216 		if (accel_module->write_config_json) {
2217 			accel_module->write_config_json(w);
2218 		}
2219 	}
2220 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2221 		if (g_modules_opc_override[i]) {
2222 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2223 		}
2224 	}
2225 
2226 	_accel_crypto_keys_write_config_json(w, true);
2227 
2228 	spdk_json_write_array_end(w);
2229 }
2230 
2231 void
2232 spdk_accel_module_finish(void)
2233 {
2234 	if (!g_accel_module) {
2235 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2236 	} else {
2237 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2238 	}
2239 
2240 	if (!g_accel_module) {
2241 		spdk_spin_destroy(&g_keyring_spin);
2242 		accel_module_finish_cb();
2243 		return;
2244 	}
2245 
2246 	if (g_accel_module->module_fini) {
2247 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2248 	} else {
2249 		spdk_accel_module_finish();
2250 	}
2251 }
2252 
2253 void
2254 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2255 {
2256 	struct spdk_accel_crypto_key *key, *key_tmp;
2257 	enum accel_opcode op;
2258 
2259 	assert(cb_fn != NULL);
2260 
2261 	g_fini_cb_fn = cb_fn;
2262 	g_fini_cb_arg = cb_arg;
2263 
2264 	spdk_spin_lock(&g_keyring_spin);
2265 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2266 		accel_crypto_key_destroy_unsafe(key);
2267 	}
2268 	spdk_spin_unlock(&g_keyring_spin);
2269 
2270 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2271 		if (g_modules_opc_override[op] != NULL) {
2272 			free(g_modules_opc_override[op]);
2273 			g_modules_opc_override[op] = NULL;
2274 		}
2275 		g_modules_opc[op].module = NULL;
2276 	}
2277 
2278 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2279 	spdk_accel_module_finish();
2280 }
2281 
2282 SPDK_LOG_REGISTER_COMPONENT(accel)
2283