xref: /spdk/lib/accel/accel.c (revision ba20950a539d0b71a20f8a1199cbf759de92e854)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		0
33 #define ACCEL_LARGE_CACHE_SIZE		0
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
64 static struct spdk_accel_driver *g_accel_driver;
65 
66 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
67 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
68 	"compress", "decompress", "encrypt", "decrypt", "xor"
69 };
70 
71 enum accel_sequence_state {
72 	ACCEL_SEQUENCE_STATE_INIT,
73 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
74 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
75 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
76 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
77 	ACCEL_SEQUENCE_STATE_PULL_DATA,
78 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
79 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
80 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
81 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
82 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
83 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
84 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
85 	ACCEL_SEQUENCE_STATE_ERROR,
86 	ACCEL_SEQUENCE_STATE_MAX,
87 };
88 
89 static const char *g_seq_states[]
90 __attribute__((unused)) = {
91 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
92 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
93 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
94 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
95 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
96 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
97 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
98 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
99 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
100 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
101 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
102 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
103 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
104 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
105 	[ACCEL_SEQUENCE_STATE_MAX] = "",
106 };
107 
108 #define ACCEL_SEQUENCE_STATE_STRING(s) \
109 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
110 	 ? g_seq_states[s] : "unknown")
111 
112 struct accel_buffer {
113 	struct spdk_accel_sequence	*seq;
114 	void				*buf;
115 	uint64_t			len;
116 	struct spdk_iobuf_entry		iobuf;
117 	spdk_accel_sequence_get_buf_cb	cb_fn;
118 	void				*cb_ctx;
119 	TAILQ_ENTRY(accel_buffer)	link;
120 };
121 
122 struct accel_io_channel {
123 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
124 	void					*task_pool_base;
125 	struct spdk_accel_sequence		*seq_pool_base;
126 	struct accel_buffer			*buf_pool_base;
127 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
128 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
129 	TAILQ_HEAD(, accel_buffer)		buf_pool;
130 	struct spdk_iobuf_channel		iobuf;
131 };
132 
133 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
134 
135 struct spdk_accel_sequence {
136 	struct accel_io_channel			*ch;
137 	struct accel_sequence_tasks		tasks;
138 	struct accel_sequence_tasks		completed;
139 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
140 	enum accel_sequence_state		state;
141 	int					status;
142 	bool					in_process_sequence;
143 	spdk_accel_completion_cb		cb_fn;
144 	void					*cb_arg;
145 	TAILQ_ENTRY(spdk_accel_sequence)	link;
146 };
147 
148 static inline void
149 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
150 {
151 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
152 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
153 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
154 	seq->state = state;
155 }
156 
157 static void
158 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
159 {
160 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
161 	assert(status != 0);
162 	seq->status = status;
163 }
164 
165 int
166 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
167 {
168 	if (opcode >= ACCEL_OPC_LAST) {
169 		/* invalid opcode */
170 		return -EINVAL;
171 	}
172 
173 	if (g_modules_opc[opcode].module) {
174 		*module_name = g_modules_opc[opcode].module->name;
175 	} else {
176 		return -ENOENT;
177 	}
178 
179 	return 0;
180 }
181 
182 void
183 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
184 {
185 	struct spdk_accel_module_if *accel_module;
186 	enum accel_opcode opcode;
187 	int j = 0;
188 
189 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
190 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
191 			if (accel_module->supports_opcode(opcode)) {
192 				info->ops[j] = opcode;
193 				j++;
194 			}
195 		}
196 		info->name = accel_module->name;
197 		info->num_ops = j;
198 		fn(info);
199 		j = 0;
200 	}
201 }
202 
203 int
204 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
205 {
206 	int rc = 0;
207 
208 	if (opcode < ACCEL_OPC_LAST) {
209 		*opcode_name = g_opcode_strings[opcode];
210 	} else {
211 		/* invalid opcode */
212 		rc = -EINVAL;
213 	}
214 
215 	return rc;
216 }
217 
218 int
219 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
220 {
221 	if (g_modules_started == true) {
222 		/* we don't allow re-assignment once things have started */
223 		return -EINVAL;
224 	}
225 
226 	if (opcode >= ACCEL_OPC_LAST) {
227 		/* invalid opcode */
228 		return -EINVAL;
229 	}
230 
231 	/* module selection will be validated after the framework starts. */
232 	g_modules_opc_override[opcode] = strdup(name);
233 
234 	return 0;
235 }
236 
237 void
238 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
239 {
240 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
241 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
242 	void				*cb_arg = accel_task->cb_arg;
243 
244 	/* We should put the accel_task into the list firstly in order to avoid
245 	 * the accel task list is exhausted when there is recursive call to
246 	 * allocate accel_task in user's call back function (cb_fn)
247 	 */
248 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
249 
250 	cb_fn(cb_arg, status);
251 }
252 
253 inline static struct spdk_accel_task *
254 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
255 {
256 	struct spdk_accel_task *accel_task;
257 
258 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
259 	if (accel_task == NULL) {
260 		return NULL;
261 	}
262 
263 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
264 	accel_task->link.tqe_next = NULL;
265 	accel_task->link.tqe_prev = NULL;
266 
267 	accel_task->cb_fn = cb_fn;
268 	accel_task->cb_arg = cb_arg;
269 	accel_task->accel_ch = accel_ch;
270 	accel_task->bounce.s.orig_iovs = NULL;
271 	accel_task->bounce.d.orig_iovs = NULL;
272 
273 	return accel_task;
274 }
275 
276 /* Accel framework public API for copy function */
277 int
278 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
279 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
280 {
281 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
282 	struct spdk_accel_task *accel_task;
283 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY].module;
284 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY];
285 
286 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
287 	if (accel_task == NULL) {
288 		return -ENOMEM;
289 	}
290 
291 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
292 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
293 	accel_task->d.iovs[0].iov_base = dst;
294 	accel_task->d.iovs[0].iov_len = nbytes;
295 	accel_task->d.iovcnt = 1;
296 	accel_task->s.iovs[0].iov_base = src;
297 	accel_task->s.iovs[0].iov_len = nbytes;
298 	accel_task->s.iovcnt = 1;
299 	accel_task->op_code = ACCEL_OPC_COPY;
300 	accel_task->flags = flags;
301 	accel_task->src_domain = NULL;
302 	accel_task->dst_domain = NULL;
303 	accel_task->step_cb_fn = NULL;
304 
305 	return module->submit_tasks(module_ch, accel_task);
306 }
307 
308 /* Accel framework public API for dual cast copy function */
309 int
310 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
311 			   void *dst2, void *src, uint64_t nbytes, int flags,
312 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
313 {
314 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
315 	struct spdk_accel_task *accel_task;
316 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DUALCAST].module;
317 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DUALCAST];
318 
319 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
320 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
321 		return -EINVAL;
322 	}
323 
324 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
325 	if (accel_task == NULL) {
326 		return -ENOMEM;
327 	}
328 
329 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
330 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
331 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
332 	accel_task->d.iovs[0].iov_base = dst1;
333 	accel_task->d.iovs[0].iov_len = nbytes;
334 	accel_task->d.iovcnt = 1;
335 	accel_task->d2.iovs[0].iov_base = dst2;
336 	accel_task->d2.iovs[0].iov_len = nbytes;
337 	accel_task->d2.iovcnt = 1;
338 	accel_task->s.iovs[0].iov_base = src;
339 	accel_task->s.iovs[0].iov_len = nbytes;
340 	accel_task->s.iovcnt = 1;
341 	accel_task->flags = flags;
342 	accel_task->op_code = ACCEL_OPC_DUALCAST;
343 	accel_task->src_domain = NULL;
344 	accel_task->dst_domain = NULL;
345 	accel_task->step_cb_fn = NULL;
346 
347 	return module->submit_tasks(module_ch, accel_task);
348 }
349 
350 /* Accel framework public API for compare function */
351 int
352 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
353 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
354 			  void *cb_arg)
355 {
356 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
357 	struct spdk_accel_task *accel_task;
358 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPARE].module;
359 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPARE];
360 
361 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
362 	if (accel_task == NULL) {
363 		return -ENOMEM;
364 	}
365 
366 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
367 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
368 	accel_task->s.iovs[0].iov_base = src1;
369 	accel_task->s.iovs[0].iov_len = nbytes;
370 	accel_task->s.iovcnt = 1;
371 	accel_task->s2.iovs[0].iov_base = src2;
372 	accel_task->s2.iovs[0].iov_len = nbytes;
373 	accel_task->s2.iovcnt = 1;
374 	accel_task->op_code = ACCEL_OPC_COMPARE;
375 	accel_task->src_domain = NULL;
376 	accel_task->dst_domain = NULL;
377 	accel_task->step_cb_fn = NULL;
378 
379 	return module->submit_tasks(module_ch, accel_task);
380 }
381 
382 /* Accel framework public API for fill function */
383 int
384 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
385 		       uint8_t fill, uint64_t nbytes, int flags,
386 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
387 {
388 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
389 	struct spdk_accel_task *accel_task;
390 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_FILL].module;
391 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_FILL];
392 
393 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
394 	if (accel_task == NULL) {
395 		return -ENOMEM;
396 	}
397 
398 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
399 	accel_task->d.iovs[0].iov_base = dst;
400 	accel_task->d.iovs[0].iov_len = nbytes;
401 	accel_task->d.iovcnt = 1;
402 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
403 	accel_task->flags = flags;
404 	accel_task->op_code = ACCEL_OPC_FILL;
405 	accel_task->src_domain = NULL;
406 	accel_task->dst_domain = NULL;
407 	accel_task->step_cb_fn = NULL;
408 
409 	return module->submit_tasks(module_ch, accel_task);
410 }
411 
412 /* Accel framework public API for CRC-32C function */
413 int
414 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
415 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
416 			 void *cb_arg)
417 {
418 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
419 	struct spdk_accel_task *accel_task;
420 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
421 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
422 
423 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
424 	if (accel_task == NULL) {
425 		return -ENOMEM;
426 	}
427 
428 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
429 	accel_task->s.iovs[0].iov_base = src;
430 	accel_task->s.iovs[0].iov_len = nbytes;
431 	accel_task->s.iovcnt = 1;
432 	accel_task->crc_dst = crc_dst;
433 	accel_task->seed = seed;
434 	accel_task->op_code = ACCEL_OPC_CRC32C;
435 	accel_task->src_domain = NULL;
436 	accel_task->dst_domain = NULL;
437 	accel_task->step_cb_fn = NULL;
438 
439 	return module->submit_tasks(module_ch, accel_task);
440 }
441 
442 /* Accel framework public API for chained CRC-32C function */
443 int
444 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
445 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
446 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
447 {
448 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
449 	struct spdk_accel_task *accel_task;
450 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
451 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
452 
453 	if (iov == NULL) {
454 		SPDK_ERRLOG("iov should not be NULL");
455 		return -EINVAL;
456 	}
457 
458 	if (!iov_cnt) {
459 		SPDK_ERRLOG("iovcnt should not be zero value\n");
460 		return -EINVAL;
461 	}
462 
463 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
464 	if (accel_task == NULL) {
465 		SPDK_ERRLOG("no memory\n");
466 		assert(0);
467 		return -ENOMEM;
468 	}
469 
470 	accel_task->s.iovs = iov;
471 	accel_task->s.iovcnt = iov_cnt;
472 	accel_task->crc_dst = crc_dst;
473 	accel_task->seed = seed;
474 	accel_task->op_code = ACCEL_OPC_CRC32C;
475 	accel_task->src_domain = NULL;
476 	accel_task->dst_domain = NULL;
477 	accel_task->step_cb_fn = NULL;
478 
479 	return module->submit_tasks(module_ch, accel_task);
480 }
481 
482 /* Accel framework public API for copy with CRC-32C function */
483 int
484 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
485 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
486 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
487 {
488 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
489 	struct spdk_accel_task *accel_task;
490 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
491 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
492 
493 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
494 	if (accel_task == NULL) {
495 		return -ENOMEM;
496 	}
497 
498 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
499 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
500 	accel_task->d.iovs[0].iov_base = dst;
501 	accel_task->d.iovs[0].iov_len = nbytes;
502 	accel_task->d.iovcnt = 1;
503 	accel_task->s.iovs[0].iov_base = src;
504 	accel_task->s.iovs[0].iov_len = nbytes;
505 	accel_task->s.iovcnt = 1;
506 	accel_task->crc_dst = crc_dst;
507 	accel_task->seed = seed;
508 	accel_task->flags = flags;
509 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
510 	accel_task->src_domain = NULL;
511 	accel_task->dst_domain = NULL;
512 	accel_task->step_cb_fn = NULL;
513 
514 	return module->submit_tasks(module_ch, accel_task);
515 }
516 
517 /* Accel framework public API for chained copy + CRC-32C function */
518 int
519 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
520 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
521 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
522 {
523 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
524 	struct spdk_accel_task *accel_task;
525 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
526 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
527 	uint64_t nbytes;
528 	uint32_t i;
529 
530 	if (src_iovs == NULL) {
531 		SPDK_ERRLOG("iov should not be NULL");
532 		return -EINVAL;
533 	}
534 
535 	if (!iov_cnt) {
536 		SPDK_ERRLOG("iovcnt should not be zero value\n");
537 		return -EINVAL;
538 	}
539 
540 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
541 	if (accel_task == NULL) {
542 		SPDK_ERRLOG("no memory\n");
543 		assert(0);
544 		return -ENOMEM;
545 	}
546 
547 	nbytes = 0;
548 	for (i = 0; i < iov_cnt; i++) {
549 		nbytes += src_iovs[i].iov_len;
550 	}
551 
552 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
553 	accel_task->d.iovs[0].iov_base = dst;
554 	accel_task->d.iovs[0].iov_len = nbytes;
555 	accel_task->d.iovcnt = 1;
556 	accel_task->s.iovs = src_iovs;
557 	accel_task->s.iovcnt = iov_cnt;
558 	accel_task->crc_dst = crc_dst;
559 	accel_task->seed = seed;
560 	accel_task->flags = flags;
561 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
562 	accel_task->src_domain = NULL;
563 	accel_task->dst_domain = NULL;
564 	accel_task->step_cb_fn = NULL;
565 
566 	return module->submit_tasks(module_ch, accel_task);
567 }
568 
569 int
570 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
571 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
572 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
573 {
574 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
575 	struct spdk_accel_task *accel_task;
576 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPRESS].module;
577 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPRESS];
578 
579 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
580 	if (accel_task == NULL) {
581 		return -ENOMEM;
582 	}
583 
584 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
585 	accel_task->d.iovs[0].iov_base = dst;
586 	accel_task->d.iovs[0].iov_len = nbytes;
587 	accel_task->d.iovcnt = 1;
588 	accel_task->output_size = output_size;
589 	accel_task->s.iovs = src_iovs;
590 	accel_task->s.iovcnt = src_iovcnt;
591 	accel_task->flags = flags;
592 	accel_task->op_code = ACCEL_OPC_COMPRESS;
593 	accel_task->src_domain = NULL;
594 	accel_task->dst_domain = NULL;
595 	accel_task->step_cb_fn = NULL;
596 
597 	return module->submit_tasks(module_ch, accel_task);
598 }
599 
600 int
601 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
602 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
603 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
604 			     void *cb_arg)
605 {
606 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
607 	struct spdk_accel_task *accel_task;
608 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECOMPRESS].module;
609 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECOMPRESS];
610 
611 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
612 	if (accel_task == NULL) {
613 		return -ENOMEM;
614 	}
615 
616 	accel_task->output_size = output_size;
617 	accel_task->s.iovs = src_iovs;
618 	accel_task->s.iovcnt = src_iovcnt;
619 	accel_task->d.iovs = dst_iovs;
620 	accel_task->d.iovcnt = dst_iovcnt;
621 	accel_task->flags = flags;
622 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
623 	accel_task->src_domain = NULL;
624 	accel_task->dst_domain = NULL;
625 	accel_task->step_cb_fn = NULL;
626 
627 	return module->submit_tasks(module_ch, accel_task);
628 }
629 
630 int
631 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
632 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
633 			  struct iovec *src_iovs, uint32_t src_iovcnt,
634 			  uint64_t iv, uint32_t block_size, int flags,
635 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
636 {
637 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
638 	struct spdk_accel_task *accel_task;
639 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
640 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_ENCRYPT];
641 
642 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
643 		return -EINVAL;
644 	}
645 
646 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
647 	if (accel_task == NULL) {
648 		return -ENOMEM;
649 	}
650 
651 	accel_task->crypto_key = key;
652 	accel_task->s.iovs = src_iovs;
653 	accel_task->s.iovcnt = src_iovcnt;
654 	accel_task->d.iovs = dst_iovs;
655 	accel_task->d.iovcnt = dst_iovcnt;
656 	accel_task->iv = iv;
657 	accel_task->block_size = block_size;
658 	accel_task->flags = flags;
659 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
660 	accel_task->src_domain = NULL;
661 	accel_task->dst_domain = NULL;
662 	accel_task->step_cb_fn = NULL;
663 
664 	return module->submit_tasks(module_ch, accel_task);
665 }
666 
667 int
668 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
669 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
670 			  struct iovec *src_iovs, uint32_t src_iovcnt,
671 			  uint64_t iv, uint32_t block_size, int flags,
672 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
673 {
674 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
675 	struct spdk_accel_task *accel_task;
676 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECRYPT].module;
677 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECRYPT];
678 
679 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
680 		return -EINVAL;
681 	}
682 
683 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
684 	if (accel_task == NULL) {
685 		return -ENOMEM;
686 	}
687 
688 	accel_task->crypto_key = key;
689 	accel_task->s.iovs = src_iovs;
690 	accel_task->s.iovcnt = src_iovcnt;
691 	accel_task->d.iovs = dst_iovs;
692 	accel_task->d.iovcnt = dst_iovcnt;
693 	accel_task->iv = iv;
694 	accel_task->block_size = block_size;
695 	accel_task->flags = flags;
696 	accel_task->op_code = ACCEL_OPC_DECRYPT;
697 	accel_task->src_domain = NULL;
698 	accel_task->dst_domain = NULL;
699 	accel_task->step_cb_fn = NULL;
700 
701 	return module->submit_tasks(module_ch, accel_task);
702 }
703 
704 int
705 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
706 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
707 {
708 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
709 	struct spdk_accel_task *accel_task;
710 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_XOR].module;
711 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_XOR];
712 
713 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
714 	if (accel_task == NULL) {
715 		return -ENOMEM;
716 	}
717 
718 	accel_task->nsrcs.srcs = sources;
719 	accel_task->nsrcs.cnt = nsrcs;
720 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
721 	accel_task->d.iovs[0].iov_base = dst;
722 	accel_task->d.iovs[0].iov_len = nbytes;
723 	accel_task->d.iovcnt = 1;
724 	accel_task->op_code = ACCEL_OPC_XOR;
725 	accel_task->src_domain = NULL;
726 	accel_task->dst_domain = NULL;
727 	accel_task->step_cb_fn = NULL;
728 
729 	return module->submit_tasks(module_ch, accel_task);
730 }
731 
732 static inline struct accel_buffer *
733 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
734 {
735 	struct accel_buffer *buf;
736 
737 	buf = TAILQ_FIRST(&ch->buf_pool);
738 	if (spdk_unlikely(buf == NULL)) {
739 		return NULL;
740 	}
741 
742 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
743 	buf->len = len;
744 	buf->buf = NULL;
745 	buf->seq = NULL;
746 	buf->cb_fn = NULL;
747 
748 	return buf;
749 }
750 
751 static inline void
752 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
753 {
754 	if (buf->buf != NULL) {
755 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
756 	}
757 
758 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
759 }
760 
761 static inline struct spdk_accel_sequence *
762 accel_sequence_get(struct accel_io_channel *ch)
763 {
764 	struct spdk_accel_sequence *seq;
765 
766 	seq = TAILQ_FIRST(&ch->seq_pool);
767 	if (seq == NULL) {
768 		return NULL;
769 	}
770 
771 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
772 
773 	TAILQ_INIT(&seq->tasks);
774 	TAILQ_INIT(&seq->completed);
775 	TAILQ_INIT(&seq->bounce_bufs);
776 
777 	seq->ch = ch;
778 	seq->status = 0;
779 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
780 	seq->in_process_sequence = false;
781 
782 	return seq;
783 }
784 
785 static inline void
786 accel_sequence_put(struct spdk_accel_sequence *seq)
787 {
788 	struct accel_io_channel *ch = seq->ch;
789 	struct accel_buffer *buf;
790 
791 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
792 		buf = TAILQ_FIRST(&seq->bounce_bufs);
793 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
794 		accel_put_buf(seq->ch, buf);
795 	}
796 
797 	assert(TAILQ_EMPTY(&seq->tasks));
798 	assert(TAILQ_EMPTY(&seq->completed));
799 	seq->ch = NULL;
800 
801 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
802 }
803 
804 static void accel_sequence_task_cb(void *cb_arg, int status);
805 
806 static inline struct spdk_accel_task *
807 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
808 			spdk_accel_step_cb cb_fn, void *cb_arg)
809 {
810 	struct spdk_accel_task *task;
811 
812 	task = _get_task(ch, accel_sequence_task_cb, seq);
813 	if (task == NULL) {
814 		return task;
815 	}
816 
817 	task->step_cb_fn = cb_fn;
818 	task->step_cb_arg = cb_arg;
819 
820 	return task;
821 }
822 
823 int
824 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
825 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
826 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
827 		       struct iovec *src_iovs, uint32_t src_iovcnt,
828 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
829 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
830 {
831 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
832 	struct spdk_accel_task *task;
833 	struct spdk_accel_sequence *seq = *pseq;
834 
835 	if (seq == NULL) {
836 		seq = accel_sequence_get(accel_ch);
837 		if (spdk_unlikely(seq == NULL)) {
838 			return -ENOMEM;
839 		}
840 	}
841 
842 	assert(seq->ch == accel_ch);
843 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
844 	if (spdk_unlikely(task == NULL)) {
845 		if (*pseq == NULL) {
846 			accel_sequence_put(seq);
847 		}
848 
849 		return -ENOMEM;
850 	}
851 
852 	task->dst_domain = dst_domain;
853 	task->dst_domain_ctx = dst_domain_ctx;
854 	task->d.iovs = dst_iovs;
855 	task->d.iovcnt = dst_iovcnt;
856 	task->src_domain = src_domain;
857 	task->src_domain_ctx = src_domain_ctx;
858 	task->s.iovs = src_iovs;
859 	task->s.iovcnt = src_iovcnt;
860 	task->flags = flags;
861 	task->op_code = ACCEL_OPC_COPY;
862 
863 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
864 	*pseq = seq;
865 
866 	return 0;
867 }
868 
869 int
870 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
871 		       void *buf, uint64_t len,
872 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
873 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
874 {
875 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
876 	struct spdk_accel_task *task;
877 	struct spdk_accel_sequence *seq = *pseq;
878 
879 	if (seq == NULL) {
880 		seq = accel_sequence_get(accel_ch);
881 		if (spdk_unlikely(seq == NULL)) {
882 			return -ENOMEM;
883 		}
884 	}
885 
886 	assert(seq->ch == accel_ch);
887 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
888 	if (spdk_unlikely(task == NULL)) {
889 		if (*pseq == NULL) {
890 			accel_sequence_put(seq);
891 		}
892 
893 		return -ENOMEM;
894 	}
895 
896 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
897 
898 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
899 	task->d.iovs[0].iov_base = buf;
900 	task->d.iovs[0].iov_len = len;
901 	task->d.iovcnt = 1;
902 	task->src_domain = NULL;
903 	task->dst_domain = domain;
904 	task->dst_domain_ctx = domain_ctx;
905 	task->flags = flags;
906 	task->op_code = ACCEL_OPC_FILL;
907 
908 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
909 	*pseq = seq;
910 
911 	return 0;
912 }
913 
914 int
915 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
916 			     struct iovec *dst_iovs, size_t dst_iovcnt,
917 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
918 			     struct iovec *src_iovs, size_t src_iovcnt,
919 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
920 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
921 {
922 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
923 	struct spdk_accel_task *task;
924 	struct spdk_accel_sequence *seq = *pseq;
925 
926 	if (seq == NULL) {
927 		seq = accel_sequence_get(accel_ch);
928 		if (spdk_unlikely(seq == NULL)) {
929 			return -ENOMEM;
930 		}
931 	}
932 
933 	assert(seq->ch == accel_ch);
934 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
935 	if (spdk_unlikely(task == NULL)) {
936 		if (*pseq == NULL) {
937 			accel_sequence_put(seq);
938 		}
939 
940 		return -ENOMEM;
941 	}
942 
943 	/* TODO: support output_size for chaining */
944 	task->output_size = NULL;
945 	task->dst_domain = dst_domain;
946 	task->dst_domain_ctx = dst_domain_ctx;
947 	task->d.iovs = dst_iovs;
948 	task->d.iovcnt = dst_iovcnt;
949 	task->src_domain = src_domain;
950 	task->src_domain_ctx = src_domain_ctx;
951 	task->s.iovs = src_iovs;
952 	task->s.iovcnt = src_iovcnt;
953 	task->flags = flags;
954 	task->op_code = ACCEL_OPC_DECOMPRESS;
955 
956 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
957 	*pseq = seq;
958 
959 	return 0;
960 }
961 
962 int
963 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
964 			  struct spdk_accel_crypto_key *key,
965 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
966 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
967 			  struct iovec *src_iovs, uint32_t src_iovcnt,
968 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
969 			  uint64_t iv, uint32_t block_size, int flags,
970 			  spdk_accel_step_cb cb_fn, void *cb_arg)
971 {
972 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
973 	struct spdk_accel_task *task;
974 	struct spdk_accel_sequence *seq = *pseq;
975 
976 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
977 			  !block_size)) {
978 		return -EINVAL;
979 	}
980 
981 	if (seq == NULL) {
982 		seq = accel_sequence_get(accel_ch);
983 		if (spdk_unlikely(seq == NULL)) {
984 			return -ENOMEM;
985 		}
986 	}
987 
988 	assert(seq->ch == accel_ch);
989 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
990 	if (spdk_unlikely(task == NULL)) {
991 		if (*pseq == NULL) {
992 			accel_sequence_put(seq);
993 		}
994 
995 		return -ENOMEM;
996 	}
997 
998 	task->crypto_key = key;
999 	task->src_domain = src_domain;
1000 	task->src_domain_ctx = src_domain_ctx;
1001 	task->s.iovs = src_iovs;
1002 	task->s.iovcnt = src_iovcnt;
1003 	task->dst_domain = dst_domain;
1004 	task->dst_domain_ctx = dst_domain_ctx;
1005 	task->d.iovs = dst_iovs;
1006 	task->d.iovcnt = dst_iovcnt;
1007 	task->iv = iv;
1008 	task->block_size = block_size;
1009 	task->flags = flags;
1010 	task->op_code = ACCEL_OPC_ENCRYPT;
1011 
1012 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1013 	*pseq = seq;
1014 
1015 	return 0;
1016 }
1017 
1018 int
1019 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1020 			  struct spdk_accel_crypto_key *key,
1021 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1022 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1023 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1024 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1025 			  uint64_t iv, uint32_t block_size, int flags,
1026 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1027 {
1028 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1029 	struct spdk_accel_task *task;
1030 	struct spdk_accel_sequence *seq = *pseq;
1031 
1032 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1033 			  !block_size)) {
1034 		return -EINVAL;
1035 	}
1036 
1037 	if (seq == NULL) {
1038 		seq = accel_sequence_get(accel_ch);
1039 		if (spdk_unlikely(seq == NULL)) {
1040 			return -ENOMEM;
1041 		}
1042 	}
1043 
1044 	assert(seq->ch == accel_ch);
1045 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1046 	if (spdk_unlikely(task == NULL)) {
1047 		if (*pseq == NULL) {
1048 			accel_sequence_put(seq);
1049 		}
1050 
1051 		return -ENOMEM;
1052 	}
1053 
1054 	task->crypto_key = key;
1055 	task->src_domain = src_domain;
1056 	task->src_domain_ctx = src_domain_ctx;
1057 	task->s.iovs = src_iovs;
1058 	task->s.iovcnt = src_iovcnt;
1059 	task->dst_domain = dst_domain;
1060 	task->dst_domain_ctx = dst_domain_ctx;
1061 	task->d.iovs = dst_iovs;
1062 	task->d.iovcnt = dst_iovcnt;
1063 	task->iv = iv;
1064 	task->block_size = block_size;
1065 	task->flags = flags;
1066 	task->op_code = ACCEL_OPC_DECRYPT;
1067 
1068 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1069 	*pseq = seq;
1070 
1071 	return 0;
1072 }
1073 
1074 int
1075 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1076 		   struct spdk_memory_domain **domain, void **domain_ctx)
1077 {
1078 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1079 	struct accel_buffer *accel_buf;
1080 
1081 	accel_buf = accel_get_buf(accel_ch, len);
1082 	if (spdk_unlikely(accel_buf == NULL)) {
1083 		return -ENOMEM;
1084 	}
1085 
1086 	/* We always return the same pointer and identify the buffers through domain_ctx */
1087 	*buf = ACCEL_BUFFER_BASE;
1088 	*domain_ctx = accel_buf;
1089 	*domain = g_accel_domain;
1090 
1091 	return 0;
1092 }
1093 
1094 void
1095 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1096 		   struct spdk_memory_domain *domain, void *domain_ctx)
1097 {
1098 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1099 	struct accel_buffer *accel_buf = domain_ctx;
1100 
1101 	assert(domain == g_accel_domain);
1102 	assert(buf == ACCEL_BUFFER_BASE);
1103 
1104 	accel_put_buf(accel_ch, accel_buf);
1105 }
1106 
1107 static void
1108 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1109 {
1110 	struct spdk_accel_task *task;
1111 	struct accel_io_channel *ch = seq->ch;
1112 	spdk_accel_step_cb cb_fn;
1113 	void *cb_arg;
1114 
1115 	while (!TAILQ_EMPTY(&seq->completed)) {
1116 		task = TAILQ_FIRST(&seq->completed);
1117 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1118 		cb_fn = task->step_cb_fn;
1119 		cb_arg = task->step_cb_arg;
1120 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1121 		if (cb_fn != NULL) {
1122 			cb_fn(cb_arg);
1123 		}
1124 	}
1125 
1126 	while (!TAILQ_EMPTY(&seq->tasks)) {
1127 		task = TAILQ_FIRST(&seq->tasks);
1128 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1129 		cb_fn = task->step_cb_fn;
1130 		cb_arg = task->step_cb_arg;
1131 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1132 		if (cb_fn != NULL) {
1133 			cb_fn(cb_arg);
1134 		}
1135 	}
1136 }
1137 
1138 static void
1139 accel_sequence_complete(struct spdk_accel_sequence *seq)
1140 {
1141 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1142 
1143 	/* First notify all users that appended operations to this sequence */
1144 	accel_sequence_complete_tasks(seq);
1145 
1146 	/* Then notify the user that finished the sequence */
1147 	seq->cb_fn(seq->cb_arg, seq->status);
1148 
1149 	accel_sequence_put(seq);
1150 }
1151 
1152 static void
1153 accel_update_buf(void **buf, struct accel_buffer *accel_buf)
1154 {
1155 	uintptr_t offset;
1156 
1157 	offset = (uintptr_t)(*buf) & ACCEL_BUFFER_OFFSET_MASK;
1158 	assert(offset < accel_buf->len);
1159 
1160 	*buf = (char *)accel_buf->buf + offset;
1161 }
1162 
1163 static void
1164 accel_update_iovs(struct iovec *iovs, uint32_t iovcnt, struct accel_buffer *buf)
1165 {
1166 	uint32_t i;
1167 
1168 	for (i = 0; i < iovcnt; ++i) {
1169 		accel_update_buf(&iovs[i].iov_base, buf);
1170 	}
1171 }
1172 
1173 static void
1174 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1175 {
1176 	struct spdk_accel_task *task;
1177 
1178 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1179 	 * in a sequence that were using it.
1180 	 */
1181 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1182 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1183 			accel_update_iovs(task->s.iovs, task->s.iovcnt, buf);
1184 			task->src_domain = NULL;
1185 		}
1186 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1187 			accel_update_iovs(task->d.iovs, task->d.iovcnt, buf);
1188 			task->dst_domain = NULL;
1189 		}
1190 	}
1191 }
1192 
1193 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1194 
1195 static void
1196 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1197 {
1198 	struct accel_buffer *accel_buf;
1199 
1200 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1201 
1202 	assert(accel_buf->seq != NULL);
1203 	assert(accel_buf->buf == NULL);
1204 	accel_buf->buf = buf;
1205 
1206 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1207 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1208 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1209 	accel_process_sequence(accel_buf->seq);
1210 }
1211 
1212 static bool
1213 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1214 			 spdk_iobuf_get_cb cb_fn)
1215 {
1216 	struct accel_io_channel *ch = seq->ch;
1217 
1218 	assert(buf->buf == NULL);
1219 	assert(buf->seq == NULL);
1220 
1221 	buf->seq = seq;
1222 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1223 	if (buf->buf == NULL) {
1224 		return false;
1225 	}
1226 
1227 	return true;
1228 }
1229 
1230 static bool
1231 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1232 {
1233 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1234 	 * NULL */
1235 	if (task->src_domain == g_accel_domain) {
1236 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1237 					      accel_iobuf_get_virtbuf_cb)) {
1238 			return false;
1239 		}
1240 
1241 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1242 	}
1243 
1244 	if (task->dst_domain == g_accel_domain) {
1245 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1246 					      accel_iobuf_get_virtbuf_cb)) {
1247 			return false;
1248 		}
1249 
1250 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1251 	}
1252 
1253 	return true;
1254 }
1255 
1256 static void
1257 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1258 {
1259 	struct accel_buffer *accel_buf;
1260 
1261 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1262 
1263 	assert(accel_buf->seq != NULL);
1264 	assert(accel_buf->buf == NULL);
1265 	accel_buf->buf = buf;
1266 
1267 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1268 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1269 }
1270 
1271 bool
1272 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1273 			      struct spdk_memory_domain *domain, void *domain_ctx,
1274 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1275 {
1276 	struct accel_buffer *accel_buf = domain_ctx;
1277 
1278 	assert(domain == g_accel_domain);
1279 	accel_buf->cb_fn = cb_fn;
1280 	accel_buf->cb_ctx = cb_ctx;
1281 
1282 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1283 		return false;
1284 	}
1285 
1286 	accel_sequence_set_virtbuf(seq, accel_buf);
1287 
1288 	return true;
1289 }
1290 
1291 struct spdk_accel_task *
1292 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1293 {
1294 	return TAILQ_FIRST(&seq->tasks);
1295 }
1296 
1297 struct spdk_accel_task *
1298 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1299 {
1300 	return TAILQ_NEXT(task, seq_link);
1301 }
1302 
1303 static inline uint64_t
1304 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1305 {
1306 	uint64_t result = 0;
1307 	uint32_t i;
1308 
1309 	for (i = 0; i < iovcnt; ++i) {
1310 		result += iovs[i].iov_len;
1311 	}
1312 
1313 	return result;
1314 }
1315 
1316 static inline void
1317 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1318 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1319 			struct accel_buffer *buf)
1320 {
1321 	bounce->orig_iovs = *iovs;
1322 	bounce->orig_iovcnt = *iovcnt;
1323 	bounce->orig_domain = *domain;
1324 	bounce->orig_domain_ctx = *domain_ctx;
1325 	bounce->iov.iov_base = buf->buf;
1326 	bounce->iov.iov_len = buf->len;
1327 
1328 	*iovs = &bounce->iov;
1329 	*iovcnt = 1;
1330 	*domain = NULL;
1331 }
1332 
1333 static void
1334 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1335 {
1336 	struct spdk_accel_task *task;
1337 	struct accel_buffer *accel_buf;
1338 
1339 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1340 	assert(accel_buf->buf == NULL);
1341 	accel_buf->buf = buf;
1342 
1343 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1344 	assert(task != NULL);
1345 
1346 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1347 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1348 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1349 				&task->src_domain_ctx, accel_buf);
1350 	accel_process_sequence(accel_buf->seq);
1351 }
1352 
1353 static void
1354 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1355 {
1356 	struct spdk_accel_task *task;
1357 	struct accel_buffer *accel_buf;
1358 
1359 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1360 	assert(accel_buf->buf == NULL);
1361 	accel_buf->buf = buf;
1362 
1363 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1364 	assert(task != NULL);
1365 
1366 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1367 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1368 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1369 				&task->dst_domain_ctx, accel_buf);
1370 	accel_process_sequence(accel_buf->seq);
1371 }
1372 
1373 static int
1374 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1375 {
1376 	struct accel_buffer *buf;
1377 
1378 	if (task->src_domain != NULL) {
1379 		/* By the time we're here, accel buffers should have been allocated */
1380 		assert(task->src_domain != g_accel_domain);
1381 
1382 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1383 		if (buf == NULL) {
1384 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1385 			return -ENOMEM;
1386 		}
1387 
1388 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1389 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1390 			return -EAGAIN;
1391 		}
1392 
1393 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1394 					&task->src_domain, &task->src_domain_ctx, buf);
1395 	}
1396 
1397 	if (task->dst_domain != NULL) {
1398 		/* By the time we're here, accel buffers should have been allocated */
1399 		assert(task->dst_domain != g_accel_domain);
1400 
1401 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1402 		if (buf == NULL) {
1403 			/* The src buffer will be released when a sequence is completed */
1404 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1405 			return -ENOMEM;
1406 		}
1407 
1408 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1409 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1410 			return -EAGAIN;
1411 		}
1412 
1413 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1414 					&task->dst_domain, &task->dst_domain_ctx, buf);
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static void
1421 accel_task_pull_data_cb(void *ctx, int status)
1422 {
1423 	struct spdk_accel_sequence *seq = ctx;
1424 
1425 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1426 	if (spdk_likely(status == 0)) {
1427 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1428 	} else {
1429 		accel_sequence_set_fail(seq, status);
1430 	}
1431 
1432 	accel_process_sequence(seq);
1433 }
1434 
1435 static void
1436 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1437 {
1438 	int rc;
1439 
1440 	assert(task->bounce.s.orig_iovs != NULL);
1441 	assert(task->bounce.s.orig_domain != NULL);
1442 	assert(task->bounce.s.orig_domain != g_accel_domain);
1443 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1444 
1445 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1446 					  task->bounce.s.orig_domain_ctx,
1447 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1448 					  task->s.iovs, task->s.iovcnt,
1449 					  accel_task_pull_data_cb, seq);
1450 	if (spdk_unlikely(rc != 0)) {
1451 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1452 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1453 		accel_sequence_set_fail(seq, rc);
1454 	}
1455 }
1456 
1457 static void
1458 accel_task_push_data_cb(void *ctx, int status)
1459 {
1460 	struct spdk_accel_sequence *seq = ctx;
1461 
1462 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1463 	if (spdk_likely(status == 0)) {
1464 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1465 	} else {
1466 		accel_sequence_set_fail(seq, status);
1467 	}
1468 
1469 	accel_process_sequence(seq);
1470 }
1471 
1472 static void
1473 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1474 {
1475 	int rc;
1476 
1477 	assert(task->bounce.d.orig_iovs != NULL);
1478 	assert(task->bounce.d.orig_domain != NULL);
1479 	assert(task->bounce.d.orig_domain != g_accel_domain);
1480 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1481 
1482 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1483 					  task->bounce.d.orig_domain_ctx,
1484 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1485 					  task->d.iovs, task->d.iovcnt,
1486 					  accel_task_push_data_cb, seq);
1487 	if (spdk_unlikely(rc != 0)) {
1488 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1489 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1490 		accel_sequence_set_fail(seq, rc);
1491 	}
1492 }
1493 
1494 static void
1495 accel_process_sequence(struct spdk_accel_sequence *seq)
1496 {
1497 	struct accel_io_channel *accel_ch = seq->ch;
1498 	struct spdk_accel_module_if *module;
1499 	struct spdk_io_channel *module_ch;
1500 	struct spdk_accel_task *task;
1501 	enum accel_sequence_state state;
1502 	int rc;
1503 
1504 	/* Prevent recursive calls to this function */
1505 	if (spdk_unlikely(seq->in_process_sequence)) {
1506 		return;
1507 	}
1508 	seq->in_process_sequence = true;
1509 
1510 	task = TAILQ_FIRST(&seq->tasks);
1511 	assert(task != NULL);
1512 
1513 	do {
1514 		state = seq->state;
1515 		switch (state) {
1516 		case ACCEL_SEQUENCE_STATE_INIT:
1517 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1518 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1519 			if (!accel_sequence_check_virtbuf(seq, task)) {
1520 				/* We couldn't allocate a buffer, wait until one is available */
1521 				break;
1522 			}
1523 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1524 		/* Fall through */
1525 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1526 			/* If a module supports memory domains, we don't need to allocate bounce
1527 			 * buffers */
1528 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1529 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1530 				break;
1531 			}
1532 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1533 			rc = accel_sequence_check_bouncebuf(seq, task);
1534 			if (rc != 0) {
1535 				/* We couldn't allocate a buffer, wait until one is available */
1536 				if (rc == -EAGAIN) {
1537 					break;
1538 				}
1539 				accel_sequence_set_fail(seq, rc);
1540 				break;
1541 			}
1542 			if (task->bounce.s.orig_iovs != NULL) {
1543 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1544 				break;
1545 			}
1546 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1547 		/* Fall through */
1548 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1549 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1550 				      g_opcode_strings[task->op_code], seq);
1551 
1552 			module = g_modules_opc[task->op_code].module;
1553 			module_ch = accel_ch->module_ch[task->op_code];
1554 
1555 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1556 			rc = module->submit_tasks(module_ch, task);
1557 			if (spdk_unlikely(rc != 0)) {
1558 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1559 					    g_opcode_strings[task->op_code], seq);
1560 				accel_sequence_set_fail(seq, rc);
1561 			}
1562 			break;
1563 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1564 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1565 			accel_task_pull_data(seq, task);
1566 			break;
1567 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1568 			if (task->bounce.d.orig_iovs != NULL) {
1569 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1570 				break;
1571 			}
1572 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1573 			break;
1574 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1575 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1576 			accel_task_push_data(seq, task);
1577 			break;
1578 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1579 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1580 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1581 			/* Check if there are any remaining tasks */
1582 			task = TAILQ_FIRST(&seq->tasks);
1583 			if (task == NULL) {
1584 				/* Immediately return here to make sure we don't touch the sequence
1585 				 * after it's completed */
1586 				accel_sequence_complete(seq);
1587 				return;
1588 			}
1589 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1590 			break;
1591 		case ACCEL_SEQUENCE_STATE_ERROR:
1592 			/* Immediately return here to make sure we don't touch the sequence
1593 			 * after it's completed */
1594 			assert(seq->status != 0);
1595 			accel_sequence_complete(seq);
1596 			return;
1597 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1598 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1599 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1600 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1601 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1602 			break;
1603 		default:
1604 			assert(0 && "bad state");
1605 			break;
1606 		}
1607 	} while (seq->state != state);
1608 
1609 	seq->in_process_sequence = false;
1610 }
1611 
1612 static void
1613 accel_sequence_task_cb(void *cb_arg, int status)
1614 {
1615 	struct spdk_accel_sequence *seq = cb_arg;
1616 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1617 	struct accel_io_channel *accel_ch = seq->ch;
1618 
1619 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1620 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1621 	 * easiest way to prevent this, even though it is a bit hacky.
1622 	 */
1623 	assert(task != NULL);
1624 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1625 
1626 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1627 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1628 
1629 	if (spdk_unlikely(status != 0)) {
1630 		SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1631 			    g_opcode_strings[task->op_code], seq);
1632 		accel_sequence_set_fail(seq, status);
1633 	}
1634 
1635 	accel_process_sequence(seq);
1636 }
1637 
1638 void
1639 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1640 {
1641 	assert(0 && "unsupported");
1642 }
1643 
1644 static bool
1645 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1646 {
1647 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1648 	if (iovacnt != iovbcnt) {
1649 		return false;
1650 	}
1651 
1652 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1653 }
1654 
1655 static void
1656 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1657 			   struct spdk_accel_task **next_task)
1658 {
1659 	struct spdk_accel_task *next = *next_task;
1660 
1661 	switch (task->op_code) {
1662 	case ACCEL_OPC_COPY:
1663 		/* We only allow changing src of operations that actually have a src, e.g. we never
1664 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1665 		 * change the src of the operation after fill (which in turn could also be a fill).
1666 		 * So, for the sake of simplicity, skip this type of operations for now.
1667 		 */
1668 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1669 		    next->op_code != ACCEL_OPC_COPY &&
1670 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1671 		    next->op_code != ACCEL_OPC_DECRYPT) {
1672 			break;
1673 		}
1674 		if (task->dst_domain != next->src_domain) {
1675 			break;
1676 		}
1677 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1678 					next->s.iovs, next->s.iovcnt)) {
1679 			break;
1680 		}
1681 		next->s.iovs = task->s.iovs;
1682 		next->s.iovcnt = task->s.iovcnt;
1683 		next->src_domain = task->src_domain;
1684 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1685 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1686 		break;
1687 	case ACCEL_OPC_DECOMPRESS:
1688 	case ACCEL_OPC_FILL:
1689 	case ACCEL_OPC_ENCRYPT:
1690 	case ACCEL_OPC_DECRYPT:
1691 		/* We can only merge tasks when one of them is a copy */
1692 		if (next->op_code != ACCEL_OPC_COPY) {
1693 			break;
1694 		}
1695 		if (task->dst_domain != next->src_domain) {
1696 			break;
1697 		}
1698 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1699 					next->s.iovs, next->s.iovcnt)) {
1700 			break;
1701 		}
1702 		task->d.iovs = next->d.iovs;
1703 		task->d.iovcnt = next->d.iovcnt;
1704 		task->dst_domain = next->dst_domain;
1705 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1706 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1707 		*next_task = TAILQ_NEXT(next, seq_link);
1708 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1709 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1710 		break;
1711 	default:
1712 		assert(0 && "bad opcode");
1713 		break;
1714 	}
1715 }
1716 
1717 int
1718 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1719 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1720 {
1721 	struct spdk_accel_task *task, *next;
1722 
1723 	/* Try to remove any copy operations if possible */
1724 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1725 		if (next == NULL) {
1726 			break;
1727 		}
1728 		accel_sequence_merge_tasks(seq, task, &next);
1729 	}
1730 
1731 	seq->cb_fn = cb_fn;
1732 	seq->cb_arg = cb_arg;
1733 
1734 	accel_process_sequence(seq);
1735 
1736 	return 0;
1737 }
1738 
1739 void
1740 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1741 {
1742 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1743 	struct spdk_accel_task *task;
1744 
1745 	assert(TAILQ_EMPTY(&seq->completed));
1746 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1747 
1748 	while (!TAILQ_EMPTY(&tasks)) {
1749 		task = TAILQ_FIRST(&tasks);
1750 		TAILQ_REMOVE(&tasks, task, seq_link);
1751 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1752 	}
1753 }
1754 
1755 void
1756 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1757 {
1758 	if (seq == NULL) {
1759 		return;
1760 	}
1761 
1762 	accel_sequence_complete_tasks(seq);
1763 	accel_sequence_put(seq);
1764 }
1765 
1766 struct spdk_memory_domain *
1767 spdk_accel_get_memory_domain(void)
1768 {
1769 	return g_accel_domain;
1770 }
1771 
1772 static struct spdk_accel_module_if *
1773 _module_find_by_name(const char *name)
1774 {
1775 	struct spdk_accel_module_if *accel_module = NULL;
1776 
1777 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1778 		if (strcmp(name, accel_module->name) == 0) {
1779 			break;
1780 		}
1781 	}
1782 
1783 	return accel_module;
1784 }
1785 
1786 static inline struct spdk_accel_crypto_key *
1787 _accel_crypto_key_get(const char *name)
1788 {
1789 	struct spdk_accel_crypto_key *key;
1790 
1791 	assert(spdk_spin_held(&g_keyring_spin));
1792 
1793 	TAILQ_FOREACH(key, &g_keyring, link) {
1794 		if (strcmp(name, key->param.key_name) == 0) {
1795 			return key;
1796 		}
1797 	}
1798 
1799 	return NULL;
1800 }
1801 
1802 static void
1803 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1804 {
1805 	if (key->param.hex_key) {
1806 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1807 		free(key->param.hex_key);
1808 	}
1809 	if (key->param.hex_key2) {
1810 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1811 		free(key->param.hex_key2);
1812 	}
1813 	free(key->param.key_name);
1814 	free(key->param.cipher);
1815 	if (key->key) {
1816 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1817 		free(key->key);
1818 	}
1819 	if (key->key2) {
1820 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1821 		free(key->key2);
1822 	}
1823 	free(key);
1824 }
1825 
1826 static void
1827 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1828 {
1829 	assert(key->module_if);
1830 	assert(key->module_if->crypto_key_deinit);
1831 
1832 	key->module_if->crypto_key_deinit(key);
1833 	accel_crypto_key_free_mem(key);
1834 }
1835 
1836 /*
1837  * This function mitigates a timing side channel which could be caused by using strcmp()
1838  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1839  * the article [1] for more details
1840  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1841  */
1842 static bool
1843 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1844 {
1845 	size_t i;
1846 	volatile size_t x = k1_len ^ k2_len;
1847 
1848 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1849 		x |= k1[i] ^ k2[i];
1850 	}
1851 
1852 	return x == 0;
1853 }
1854 
1855 int
1856 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1857 {
1858 	struct spdk_accel_module_if *module;
1859 	struct spdk_accel_crypto_key *key;
1860 	size_t hex_key_size, hex_key2_size;
1861 	int rc;
1862 
1863 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1864 		return -EINVAL;
1865 	}
1866 
1867 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1868 		/* hardly ever possible, but let's check and warn the user */
1869 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1870 	}
1871 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1872 
1873 	if (!module) {
1874 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1875 		return -ENOENT;
1876 	}
1877 	if (!module->crypto_key_init) {
1878 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1879 		return -ENOTSUP;
1880 	}
1881 
1882 	key = calloc(1, sizeof(*key));
1883 	if (!key) {
1884 		return -ENOMEM;
1885 	}
1886 
1887 	key->param.key_name = strdup(param->key_name);
1888 	if (!key->param.key_name) {
1889 		rc = -ENOMEM;
1890 		goto error;
1891 	}
1892 
1893 	key->param.cipher = strdup(param->cipher);
1894 	if (!key->param.cipher) {
1895 		rc = -ENOMEM;
1896 		goto error;
1897 	}
1898 
1899 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1900 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1901 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1902 		rc = -EINVAL;
1903 		goto error;
1904 	}
1905 	key->param.hex_key = strdup(param->hex_key);
1906 	if (!key->param.hex_key) {
1907 		rc = -ENOMEM;
1908 		goto error;
1909 	}
1910 
1911 	key->key_size = hex_key_size / 2;
1912 	key->key = spdk_unhexlify(key->param.hex_key);
1913 	if (!key->key) {
1914 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1915 		rc = -EINVAL;
1916 		goto error;
1917 	}
1918 
1919 	if (param->hex_key2) {
1920 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1921 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1922 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1923 			rc = -EINVAL;
1924 			goto error;
1925 		}
1926 		key->param.hex_key2 = strdup(param->hex_key2);
1927 		if (!key->param.hex_key2) {
1928 			rc = -ENOMEM;
1929 			goto error;
1930 		}
1931 
1932 		key->key2_size = hex_key2_size / 2;
1933 		key->key2 = spdk_unhexlify(key->param.hex_key2);
1934 		if (!key->key2) {
1935 			SPDK_ERRLOG("Failed to unhexlify key2\n");
1936 			rc = -EINVAL;
1937 			goto error;
1938 		}
1939 
1940 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
1941 			SPDK_ERRLOG("Identical keys are not secure\n");
1942 			rc = -EINVAL;
1943 			goto error;
1944 		}
1945 	}
1946 
1947 	key->module_if = module;
1948 
1949 	spdk_spin_lock(&g_keyring_spin);
1950 	if (_accel_crypto_key_get(param->key_name)) {
1951 		rc = -EEXIST;
1952 	} else {
1953 		rc = module->crypto_key_init(key);
1954 		if (!rc) {
1955 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
1956 		}
1957 	}
1958 	spdk_spin_unlock(&g_keyring_spin);
1959 
1960 	if (rc) {
1961 		goto error;
1962 	}
1963 
1964 	return 0;
1965 
1966 error:
1967 	accel_crypto_key_free_mem(key);
1968 	return rc;
1969 }
1970 
1971 int
1972 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
1973 {
1974 	if (!key || !key->module_if) {
1975 		return -EINVAL;
1976 	}
1977 
1978 	spdk_spin_lock(&g_keyring_spin);
1979 	if (!_accel_crypto_key_get(key->param.key_name)) {
1980 		spdk_spin_unlock(&g_keyring_spin);
1981 		return -ENOENT;
1982 	}
1983 	TAILQ_REMOVE(&g_keyring, key, link);
1984 	spdk_spin_unlock(&g_keyring_spin);
1985 
1986 	accel_crypto_key_destroy_unsafe(key);
1987 
1988 	return 0;
1989 }
1990 
1991 struct spdk_accel_crypto_key *
1992 spdk_accel_crypto_key_get(const char *name)
1993 {
1994 	struct spdk_accel_crypto_key *key;
1995 
1996 	spdk_spin_lock(&g_keyring_spin);
1997 	key = _accel_crypto_key_get(name);
1998 	spdk_spin_unlock(&g_keyring_spin);
1999 
2000 	return key;
2001 }
2002 
2003 /* Helper function when accel modules register with the framework. */
2004 void
2005 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2006 {
2007 	if (_module_find_by_name(accel_module->name)) {
2008 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2009 		assert(false);
2010 		return;
2011 	}
2012 
2013 	/* Make sure that the software module is at the head of the list, this
2014 	 * will assure that all opcodes are later assigned to software first and
2015 	 * then updated to HW modules as they are registered.
2016 	 */
2017 	if (strcmp(accel_module->name, "software") == 0) {
2018 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2019 	} else {
2020 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2021 	}
2022 
2023 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2024 		g_max_accel_module_size = accel_module->get_ctx_size();
2025 	}
2026 }
2027 
2028 /* Framework level channel create callback. */
2029 static int
2030 accel_create_channel(void *io_device, void *ctx_buf)
2031 {
2032 	struct accel_io_channel	*accel_ch = ctx_buf;
2033 	struct spdk_accel_task *accel_task;
2034 	struct spdk_accel_sequence *seq;
2035 	struct accel_buffer *buf;
2036 	uint8_t *task_mem;
2037 	int i = 0, j, rc;
2038 
2039 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
2040 	if (accel_ch->task_pool_base == NULL) {
2041 		return -ENOMEM;
2042 	}
2043 
2044 	accel_ch->seq_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct spdk_accel_sequence));
2045 	if (accel_ch->seq_pool_base == NULL) {
2046 		goto err;
2047 	}
2048 
2049 	accel_ch->buf_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct accel_buffer));
2050 	if (accel_ch->buf_pool_base == NULL) {
2051 		goto err;
2052 	}
2053 
2054 	TAILQ_INIT(&accel_ch->task_pool);
2055 	TAILQ_INIT(&accel_ch->seq_pool);
2056 	TAILQ_INIT(&accel_ch->buf_pool);
2057 	task_mem = accel_ch->task_pool_base;
2058 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
2059 		accel_task = (struct spdk_accel_task *)task_mem;
2060 		seq = &accel_ch->seq_pool_base[i];
2061 		buf = &accel_ch->buf_pool_base[i];
2062 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2063 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2064 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2065 		task_mem += g_max_accel_module_size;
2066 	}
2067 
2068 	/* Assign modules and get IO channels for each */
2069 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2070 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2071 		/* This can happen if idxd runs out of channels. */
2072 		if (accel_ch->module_ch[i] == NULL) {
2073 			goto err;
2074 		}
2075 	}
2076 
2077 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", ACCEL_SMALL_CACHE_SIZE,
2078 				     ACCEL_LARGE_CACHE_SIZE);
2079 	if (rc != 0) {
2080 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2081 		goto err;
2082 	}
2083 
2084 	return 0;
2085 err:
2086 	for (j = 0; j < i; j++) {
2087 		spdk_put_io_channel(accel_ch->module_ch[j]);
2088 	}
2089 	free(accel_ch->task_pool_base);
2090 	free(accel_ch->seq_pool_base);
2091 	free(accel_ch->buf_pool_base);
2092 	return -ENOMEM;
2093 }
2094 
2095 /* Framework level channel destroy callback. */
2096 static void
2097 accel_destroy_channel(void *io_device, void *ctx_buf)
2098 {
2099 	struct accel_io_channel	*accel_ch = ctx_buf;
2100 	int i;
2101 
2102 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2103 
2104 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2105 		assert(accel_ch->module_ch[i] != NULL);
2106 		spdk_put_io_channel(accel_ch->module_ch[i]);
2107 		accel_ch->module_ch[i] = NULL;
2108 	}
2109 
2110 	free(accel_ch->task_pool_base);
2111 	free(accel_ch->seq_pool_base);
2112 	free(accel_ch->buf_pool_base);
2113 }
2114 
2115 struct spdk_io_channel *
2116 spdk_accel_get_io_channel(void)
2117 {
2118 	return spdk_get_io_channel(&spdk_accel_module_list);
2119 }
2120 
2121 static void
2122 accel_module_initialize(void)
2123 {
2124 	struct spdk_accel_module_if *accel_module;
2125 
2126 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2127 		accel_module->module_init();
2128 	}
2129 }
2130 
2131 static void
2132 accel_module_init_opcode(enum accel_opcode opcode)
2133 {
2134 	struct accel_module *module = &g_modules_opc[opcode];
2135 	struct spdk_accel_module_if *module_if = module->module;
2136 
2137 	if (module_if->get_memory_domains != NULL) {
2138 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2139 	}
2140 }
2141 
2142 int
2143 spdk_accel_initialize(void)
2144 {
2145 	enum accel_opcode op;
2146 	struct spdk_accel_module_if *accel_module = NULL;
2147 	int rc;
2148 
2149 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2150 				       "SPDK_ACCEL_DMA_DEVICE");
2151 	if (rc != 0) {
2152 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2153 		return rc;
2154 	}
2155 
2156 	spdk_spin_init(&g_keyring_spin);
2157 
2158 	g_modules_started = true;
2159 	accel_module_initialize();
2160 
2161 	/* Create our priority global map of opcodes to modules, we populate starting
2162 	 * with the software module (guaranteed to be first on the list) and then
2163 	 * updating opcodes with HW modules that have been initialized.
2164 	 * NOTE: all opcodes must be supported by software in the event that no HW
2165 	 * modules are initialized to support the operation.
2166 	 */
2167 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2168 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2169 			if (accel_module->supports_opcode(op)) {
2170 				g_modules_opc[op].module = accel_module;
2171 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2172 			}
2173 		}
2174 	}
2175 
2176 	/* Now lets check for overrides and apply all that exist */
2177 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2178 		if (g_modules_opc_override[op] != NULL) {
2179 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2180 			if (accel_module == NULL) {
2181 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2182 				rc = -EINVAL;
2183 				goto error;
2184 			}
2185 			if (accel_module->supports_opcode(op) == false) {
2186 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2187 				rc = -EINVAL;
2188 				goto error;
2189 			}
2190 			g_modules_opc[op].module = accel_module;
2191 		}
2192 	}
2193 
2194 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2195 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2196 		rc = -EINVAL;
2197 		goto error;
2198 	}
2199 
2200 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2201 		assert(g_modules_opc[op].module != NULL);
2202 		accel_module_init_opcode(op);
2203 	}
2204 
2205 	rc = spdk_iobuf_register_module("accel");
2206 	if (rc != 0) {
2207 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2208 		goto error;
2209 	}
2210 
2211 	/*
2212 	 * We need a unique identifier for the accel framework, so use the
2213 	 * spdk_accel_module_list address for this purpose.
2214 	 */
2215 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2216 				sizeof(struct accel_io_channel), "accel");
2217 
2218 	return 0;
2219 error:
2220 	spdk_memory_domain_destroy(g_accel_domain);
2221 
2222 	return rc;
2223 }
2224 
2225 static void
2226 accel_module_finish_cb(void)
2227 {
2228 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2229 
2230 	spdk_memory_domain_destroy(g_accel_domain);
2231 
2232 	cb_fn(g_fini_cb_arg);
2233 	g_fini_cb_fn = NULL;
2234 	g_fini_cb_arg = NULL;
2235 }
2236 
2237 static void
2238 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2239 			   const char *module_str)
2240 {
2241 	spdk_json_write_object_begin(w);
2242 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2243 	spdk_json_write_named_object_begin(w, "params");
2244 	spdk_json_write_named_string(w, "opname", opc_str);
2245 	spdk_json_write_named_string(w, "module", module_str);
2246 	spdk_json_write_object_end(w);
2247 	spdk_json_write_object_end(w);
2248 }
2249 
2250 static void
2251 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2252 {
2253 	spdk_json_write_named_string(w, "name", key->param.key_name);
2254 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2255 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2256 	if (key->param.hex_key2) {
2257 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2258 	}
2259 }
2260 
2261 void
2262 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2263 {
2264 	spdk_json_write_object_begin(w);
2265 	__accel_crypto_key_dump_param(w, key);
2266 	spdk_json_write_object_end(w);
2267 }
2268 
2269 static void
2270 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2271 				    struct spdk_accel_crypto_key *key)
2272 {
2273 	spdk_json_write_object_begin(w);
2274 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2275 	spdk_json_write_named_object_begin(w, "params");
2276 	__accel_crypto_key_dump_param(w, key);
2277 	spdk_json_write_object_end(w);
2278 	spdk_json_write_object_end(w);
2279 }
2280 
2281 static void
2282 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2283 {
2284 	struct spdk_accel_crypto_key *key;
2285 
2286 	spdk_spin_lock(&g_keyring_spin);
2287 	TAILQ_FOREACH(key, &g_keyring, link) {
2288 		if (full_dump) {
2289 			_accel_crypto_key_write_config_json(w, key);
2290 		} else {
2291 			_accel_crypto_key_dump_param(w, key);
2292 		}
2293 	}
2294 	spdk_spin_unlock(&g_keyring_spin);
2295 }
2296 
2297 void
2298 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2299 {
2300 	_accel_crypto_keys_write_config_json(w, false);
2301 }
2302 
2303 void
2304 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2305 {
2306 	struct spdk_accel_module_if *accel_module;
2307 	int i;
2308 
2309 	/*
2310 	 * The accel fw has no config, there may be some in
2311 	 * the modules though.
2312 	 */
2313 	spdk_json_write_array_begin(w);
2314 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2315 		if (accel_module->write_config_json) {
2316 			accel_module->write_config_json(w);
2317 		}
2318 	}
2319 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2320 		if (g_modules_opc_override[i]) {
2321 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2322 		}
2323 	}
2324 
2325 	_accel_crypto_keys_write_config_json(w, true);
2326 
2327 	spdk_json_write_array_end(w);
2328 }
2329 
2330 void
2331 spdk_accel_module_finish(void)
2332 {
2333 	if (!g_accel_module) {
2334 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2335 	} else {
2336 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2337 	}
2338 
2339 	if (!g_accel_module) {
2340 		spdk_spin_destroy(&g_keyring_spin);
2341 		accel_module_finish_cb();
2342 		return;
2343 	}
2344 
2345 	if (g_accel_module->module_fini) {
2346 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2347 	} else {
2348 		spdk_accel_module_finish();
2349 	}
2350 }
2351 
2352 void
2353 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2354 {
2355 	struct spdk_accel_crypto_key *key, *key_tmp;
2356 	enum accel_opcode op;
2357 
2358 	assert(cb_fn != NULL);
2359 
2360 	g_fini_cb_fn = cb_fn;
2361 	g_fini_cb_arg = cb_arg;
2362 
2363 	spdk_spin_lock(&g_keyring_spin);
2364 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2365 		accel_crypto_key_destroy_unsafe(key);
2366 	}
2367 	spdk_spin_unlock(&g_keyring_spin);
2368 
2369 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2370 		if (g_modules_opc_override[op] != NULL) {
2371 			free(g_modules_opc_override[op]);
2372 			g_modules_opc_override[op] = NULL;
2373 		}
2374 		g_modules_opc[op].module = NULL;
2375 	}
2376 
2377 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2378 	spdk_accel_module_finish();
2379 }
2380 
2381 static struct spdk_accel_driver *
2382 accel_find_driver(const char *name)
2383 {
2384 	struct spdk_accel_driver *driver;
2385 
2386 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2387 		if (strcmp(driver->name, name) == 0) {
2388 			return driver;
2389 		}
2390 	}
2391 
2392 	return NULL;
2393 }
2394 
2395 int
2396 spdk_accel_set_driver(const char *name)
2397 {
2398 	struct spdk_accel_driver *driver;
2399 
2400 	driver = accel_find_driver(name);
2401 	if (driver == NULL) {
2402 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2403 		return -ENODEV;
2404 	}
2405 
2406 	g_accel_driver = driver;
2407 
2408 	return 0;
2409 }
2410 
2411 void
2412 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2413 {
2414 	if (accel_find_driver(driver->name)) {
2415 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2416 		assert(0);
2417 		return;
2418 	}
2419 
2420 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2421 }
2422 
2423 SPDK_LOG_REGISTER_COMPONENT(accel)
2424