xref: /spdk/lib/accel/accel.c (revision 324e3261e6744af9613b13139266f6efa3276d58)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
64 static struct spdk_accel_driver *g_accel_driver;
65 static struct spdk_accel_opts g_opts = {
66 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
67 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
68 	.task_count = MAX_TASKS_PER_CHANNEL,
69 	.sequence_count = MAX_TASKS_PER_CHANNEL,
70 	.buf_count = MAX_TASKS_PER_CHANNEL,
71 };
72 static struct accel_stats g_stats;
73 static struct spdk_spinlock g_stats_lock;
74 
75 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
76 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
77 	"compress", "decompress", "encrypt", "decrypt", "xor"
78 };
79 
80 enum accel_sequence_state {
81 	ACCEL_SEQUENCE_STATE_INIT,
82 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
83 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
84 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
85 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
86 	ACCEL_SEQUENCE_STATE_PULL_DATA,
87 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
88 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
89 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
90 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
91 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
92 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
93 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
94 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
95 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
96 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
97 	ACCEL_SEQUENCE_STATE_ERROR,
98 	ACCEL_SEQUENCE_STATE_MAX,
99 };
100 
101 static const char *g_seq_states[]
102 __attribute__((unused)) = {
103 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
104 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
105 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
106 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
107 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
108 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
109 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
110 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
111 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
112 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
113 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
114 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
115 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
116 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
117 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
118 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
119 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
120 	[ACCEL_SEQUENCE_STATE_MAX] = "",
121 };
122 
123 #define ACCEL_SEQUENCE_STATE_STRING(s) \
124 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
125 	 ? g_seq_states[s] : "unknown")
126 
127 struct accel_buffer {
128 	struct spdk_accel_sequence	*seq;
129 	void				*buf;
130 	uint64_t			len;
131 	struct spdk_iobuf_entry		iobuf;
132 	spdk_accel_sequence_get_buf_cb	cb_fn;
133 	void				*cb_ctx;
134 	TAILQ_ENTRY(accel_buffer)	link;
135 };
136 
137 struct accel_io_channel {
138 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
139 	void					*task_pool_base;
140 	struct spdk_accel_sequence		*seq_pool_base;
141 	struct accel_buffer			*buf_pool_base;
142 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
143 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
144 	TAILQ_HEAD(, accel_buffer)		buf_pool;
145 	struct spdk_iobuf_channel		iobuf;
146 	struct accel_stats			stats;
147 };
148 
149 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
150 
151 struct spdk_accel_sequence {
152 	struct accel_io_channel			*ch;
153 	struct accel_sequence_tasks		tasks;
154 	struct accel_sequence_tasks		completed;
155 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
156 	enum accel_sequence_state		state;
157 	int					status;
158 	bool					in_process_sequence;
159 	spdk_accel_completion_cb		cb_fn;
160 	void					*cb_arg;
161 	TAILQ_ENTRY(spdk_accel_sequence)	link;
162 };
163 
164 #define accel_update_stats(ch, event) (ch)->stats.event++
165 #define accel_update_task_stats(ch, task, event) \
166 	accel_update_stats(ch, operations[(task)->op_code].event)
167 
168 static inline void
169 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
170 {
171 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
172 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
173 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
174 	seq->state = state;
175 }
176 
177 static void
178 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
179 {
180 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
181 	assert(status != 0);
182 	seq->status = status;
183 }
184 
185 int
186 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
187 {
188 	if (opcode >= ACCEL_OPC_LAST) {
189 		/* invalid opcode */
190 		return -EINVAL;
191 	}
192 
193 	if (g_modules_opc[opcode].module) {
194 		*module_name = g_modules_opc[opcode].module->name;
195 	} else {
196 		return -ENOENT;
197 	}
198 
199 	return 0;
200 }
201 
202 void
203 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
204 {
205 	struct spdk_accel_module_if *accel_module;
206 	enum accel_opcode opcode;
207 	int j = 0;
208 
209 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
210 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
211 			if (accel_module->supports_opcode(opcode)) {
212 				info->ops[j] = opcode;
213 				j++;
214 			}
215 		}
216 		info->name = accel_module->name;
217 		info->num_ops = j;
218 		fn(info);
219 		j = 0;
220 	}
221 }
222 
223 int
224 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
225 {
226 	int rc = 0;
227 
228 	if (opcode < ACCEL_OPC_LAST) {
229 		*opcode_name = g_opcode_strings[opcode];
230 	} else {
231 		/* invalid opcode */
232 		rc = -EINVAL;
233 	}
234 
235 	return rc;
236 }
237 
238 int
239 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
240 {
241 	if (g_modules_started == true) {
242 		/* we don't allow re-assignment once things have started */
243 		return -EINVAL;
244 	}
245 
246 	if (opcode >= ACCEL_OPC_LAST) {
247 		/* invalid opcode */
248 		return -EINVAL;
249 	}
250 
251 	/* module selection will be validated after the framework starts. */
252 	g_modules_opc_override[opcode] = strdup(name);
253 
254 	return 0;
255 }
256 
257 void
258 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
259 {
260 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
261 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
262 	void				*cb_arg = accel_task->cb_arg;
263 
264 	/* We should put the accel_task into the list firstly in order to avoid
265 	 * the accel task list is exhausted when there is recursive call to
266 	 * allocate accel_task in user's call back function (cb_fn)
267 	 */
268 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
269 
270 	accel_update_task_stats(accel_ch, accel_task, executed);
271 	if (spdk_unlikely(status != 0)) {
272 		accel_update_task_stats(accel_ch, accel_task, failed);
273 	}
274 
275 	cb_fn(cb_arg, status);
276 }
277 
278 inline static struct spdk_accel_task *
279 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
280 {
281 	struct spdk_accel_task *accel_task;
282 
283 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
284 	if (accel_task == NULL) {
285 		return NULL;
286 	}
287 
288 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
289 	accel_task->link.tqe_next = NULL;
290 	accel_task->link.tqe_prev = NULL;
291 
292 	accel_task->cb_fn = cb_fn;
293 	accel_task->cb_arg = cb_arg;
294 	accel_task->accel_ch = accel_ch;
295 	accel_task->bounce.s.orig_iovs = NULL;
296 	accel_task->bounce.d.orig_iovs = NULL;
297 
298 	return accel_task;
299 }
300 
301 static inline int
302 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
303 {
304 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
305 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
306 	int rc;
307 
308 	rc = module->submit_tasks(module_ch, task);
309 	if (spdk_unlikely(rc != 0)) {
310 		accel_update_task_stats(accel_ch, task, failed);
311 	}
312 
313 	return rc;
314 }
315 
316 /* Accel framework public API for copy function */
317 int
318 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
319 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
320 {
321 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
322 	struct spdk_accel_task *accel_task;
323 
324 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
325 	if (accel_task == NULL) {
326 		return -ENOMEM;
327 	}
328 
329 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
330 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
331 	accel_task->d.iovs[0].iov_base = dst;
332 	accel_task->d.iovs[0].iov_len = nbytes;
333 	accel_task->d.iovcnt = 1;
334 	accel_task->s.iovs[0].iov_base = src;
335 	accel_task->s.iovs[0].iov_len = nbytes;
336 	accel_task->s.iovcnt = 1;
337 	accel_task->op_code = ACCEL_OPC_COPY;
338 	accel_task->flags = flags;
339 	accel_task->src_domain = NULL;
340 	accel_task->dst_domain = NULL;
341 	accel_task->step_cb_fn = NULL;
342 
343 	return accel_submit_task(accel_ch, accel_task);
344 }
345 
346 /* Accel framework public API for dual cast copy function */
347 int
348 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
349 			   void *dst2, void *src, uint64_t nbytes, int flags,
350 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
351 {
352 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
353 	struct spdk_accel_task *accel_task;
354 
355 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
356 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
357 		return -EINVAL;
358 	}
359 
360 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
361 	if (accel_task == NULL) {
362 		return -ENOMEM;
363 	}
364 
365 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
366 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
367 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
368 	accel_task->d.iovs[0].iov_base = dst1;
369 	accel_task->d.iovs[0].iov_len = nbytes;
370 	accel_task->d.iovcnt = 1;
371 	accel_task->d2.iovs[0].iov_base = dst2;
372 	accel_task->d2.iovs[0].iov_len = nbytes;
373 	accel_task->d2.iovcnt = 1;
374 	accel_task->s.iovs[0].iov_base = src;
375 	accel_task->s.iovs[0].iov_len = nbytes;
376 	accel_task->s.iovcnt = 1;
377 	accel_task->flags = flags;
378 	accel_task->op_code = ACCEL_OPC_DUALCAST;
379 	accel_task->src_domain = NULL;
380 	accel_task->dst_domain = NULL;
381 	accel_task->step_cb_fn = NULL;
382 
383 	return accel_submit_task(accel_ch, accel_task);
384 }
385 
386 /* Accel framework public API for compare function */
387 int
388 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
389 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
390 			  void *cb_arg)
391 {
392 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
393 	struct spdk_accel_task *accel_task;
394 
395 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
396 	if (accel_task == NULL) {
397 		return -ENOMEM;
398 	}
399 
400 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
401 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
402 	accel_task->s.iovs[0].iov_base = src1;
403 	accel_task->s.iovs[0].iov_len = nbytes;
404 	accel_task->s.iovcnt = 1;
405 	accel_task->s2.iovs[0].iov_base = src2;
406 	accel_task->s2.iovs[0].iov_len = nbytes;
407 	accel_task->s2.iovcnt = 1;
408 	accel_task->op_code = ACCEL_OPC_COMPARE;
409 	accel_task->src_domain = NULL;
410 	accel_task->dst_domain = NULL;
411 	accel_task->step_cb_fn = NULL;
412 
413 	return accel_submit_task(accel_ch, accel_task);
414 }
415 
416 /* Accel framework public API for fill function */
417 int
418 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
419 		       uint8_t fill, uint64_t nbytes, int flags,
420 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
421 {
422 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
423 	struct spdk_accel_task *accel_task;
424 
425 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
426 	if (accel_task == NULL) {
427 		return -ENOMEM;
428 	}
429 
430 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
431 	accel_task->d.iovs[0].iov_base = dst;
432 	accel_task->d.iovs[0].iov_len = nbytes;
433 	accel_task->d.iovcnt = 1;
434 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
435 	accel_task->flags = flags;
436 	accel_task->op_code = ACCEL_OPC_FILL;
437 	accel_task->src_domain = NULL;
438 	accel_task->dst_domain = NULL;
439 	accel_task->step_cb_fn = NULL;
440 
441 	return accel_submit_task(accel_ch, accel_task);
442 }
443 
444 /* Accel framework public API for CRC-32C function */
445 int
446 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
447 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
448 			 void *cb_arg)
449 {
450 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
451 	struct spdk_accel_task *accel_task;
452 
453 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
454 	if (accel_task == NULL) {
455 		return -ENOMEM;
456 	}
457 
458 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
459 	accel_task->s.iovs[0].iov_base = src;
460 	accel_task->s.iovs[0].iov_len = nbytes;
461 	accel_task->s.iovcnt = 1;
462 	accel_task->crc_dst = crc_dst;
463 	accel_task->seed = seed;
464 	accel_task->op_code = ACCEL_OPC_CRC32C;
465 	accel_task->src_domain = NULL;
466 	accel_task->dst_domain = NULL;
467 	accel_task->step_cb_fn = NULL;
468 
469 	return accel_submit_task(accel_ch, accel_task);
470 }
471 
472 /* Accel framework public API for chained CRC-32C function */
473 int
474 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
475 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
476 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
477 {
478 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
479 	struct spdk_accel_task *accel_task;
480 
481 	if (iov == NULL) {
482 		SPDK_ERRLOG("iov should not be NULL");
483 		return -EINVAL;
484 	}
485 
486 	if (!iov_cnt) {
487 		SPDK_ERRLOG("iovcnt should not be zero value\n");
488 		return -EINVAL;
489 	}
490 
491 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
492 	if (accel_task == NULL) {
493 		SPDK_ERRLOG("no memory\n");
494 		assert(0);
495 		return -ENOMEM;
496 	}
497 
498 	accel_task->s.iovs = iov;
499 	accel_task->s.iovcnt = iov_cnt;
500 	accel_task->crc_dst = crc_dst;
501 	accel_task->seed = seed;
502 	accel_task->op_code = ACCEL_OPC_CRC32C;
503 	accel_task->src_domain = NULL;
504 	accel_task->dst_domain = NULL;
505 	accel_task->step_cb_fn = NULL;
506 
507 	return accel_submit_task(accel_ch, accel_task);
508 }
509 
510 /* Accel framework public API for copy with CRC-32C function */
511 int
512 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
513 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
514 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
515 {
516 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
517 	struct spdk_accel_task *accel_task;
518 
519 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
520 	if (accel_task == NULL) {
521 		return -ENOMEM;
522 	}
523 
524 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
525 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
526 	accel_task->d.iovs[0].iov_base = dst;
527 	accel_task->d.iovs[0].iov_len = nbytes;
528 	accel_task->d.iovcnt = 1;
529 	accel_task->s.iovs[0].iov_base = src;
530 	accel_task->s.iovs[0].iov_len = nbytes;
531 	accel_task->s.iovcnt = 1;
532 	accel_task->crc_dst = crc_dst;
533 	accel_task->seed = seed;
534 	accel_task->flags = flags;
535 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
536 	accel_task->src_domain = NULL;
537 	accel_task->dst_domain = NULL;
538 	accel_task->step_cb_fn = NULL;
539 
540 	return accel_submit_task(accel_ch, accel_task);
541 }
542 
543 /* Accel framework public API for chained copy + CRC-32C function */
544 int
545 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
546 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
547 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
548 {
549 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
550 	struct spdk_accel_task *accel_task;
551 	uint64_t nbytes;
552 	uint32_t i;
553 
554 	if (src_iovs == NULL) {
555 		SPDK_ERRLOG("iov should not be NULL");
556 		return -EINVAL;
557 	}
558 
559 	if (!iov_cnt) {
560 		SPDK_ERRLOG("iovcnt should not be zero value\n");
561 		return -EINVAL;
562 	}
563 
564 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
565 	if (accel_task == NULL) {
566 		SPDK_ERRLOG("no memory\n");
567 		assert(0);
568 		return -ENOMEM;
569 	}
570 
571 	nbytes = 0;
572 	for (i = 0; i < iov_cnt; i++) {
573 		nbytes += src_iovs[i].iov_len;
574 	}
575 
576 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
577 	accel_task->d.iovs[0].iov_base = dst;
578 	accel_task->d.iovs[0].iov_len = nbytes;
579 	accel_task->d.iovcnt = 1;
580 	accel_task->s.iovs = src_iovs;
581 	accel_task->s.iovcnt = iov_cnt;
582 	accel_task->crc_dst = crc_dst;
583 	accel_task->seed = seed;
584 	accel_task->flags = flags;
585 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
586 	accel_task->src_domain = NULL;
587 	accel_task->dst_domain = NULL;
588 	accel_task->step_cb_fn = NULL;
589 
590 	return accel_submit_task(accel_ch, accel_task);
591 }
592 
593 int
594 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
595 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
596 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
597 {
598 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
599 	struct spdk_accel_task *accel_task;
600 
601 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
602 	if (accel_task == NULL) {
603 		return -ENOMEM;
604 	}
605 
606 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
607 	accel_task->d.iovs[0].iov_base = dst;
608 	accel_task->d.iovs[0].iov_len = nbytes;
609 	accel_task->d.iovcnt = 1;
610 	accel_task->output_size = output_size;
611 	accel_task->s.iovs = src_iovs;
612 	accel_task->s.iovcnt = src_iovcnt;
613 	accel_task->flags = flags;
614 	accel_task->op_code = ACCEL_OPC_COMPRESS;
615 	accel_task->src_domain = NULL;
616 	accel_task->dst_domain = NULL;
617 	accel_task->step_cb_fn = NULL;
618 
619 	return accel_submit_task(accel_ch, accel_task);
620 }
621 
622 int
623 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
624 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
625 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
626 			     void *cb_arg)
627 {
628 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
629 	struct spdk_accel_task *accel_task;
630 
631 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
632 	if (accel_task == NULL) {
633 		return -ENOMEM;
634 	}
635 
636 	accel_task->output_size = output_size;
637 	accel_task->s.iovs = src_iovs;
638 	accel_task->s.iovcnt = src_iovcnt;
639 	accel_task->d.iovs = dst_iovs;
640 	accel_task->d.iovcnt = dst_iovcnt;
641 	accel_task->flags = flags;
642 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
643 	accel_task->src_domain = NULL;
644 	accel_task->dst_domain = NULL;
645 	accel_task->step_cb_fn = NULL;
646 
647 	return accel_submit_task(accel_ch, accel_task);
648 }
649 
650 int
651 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
652 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
653 			  struct iovec *src_iovs, uint32_t src_iovcnt,
654 			  uint64_t iv, uint32_t block_size, int flags,
655 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
656 {
657 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
658 	struct spdk_accel_task *accel_task;
659 
660 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
661 		return -EINVAL;
662 	}
663 
664 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
665 	if (accel_task == NULL) {
666 		return -ENOMEM;
667 	}
668 
669 	accel_task->crypto_key = key;
670 	accel_task->s.iovs = src_iovs;
671 	accel_task->s.iovcnt = src_iovcnt;
672 	accel_task->d.iovs = dst_iovs;
673 	accel_task->d.iovcnt = dst_iovcnt;
674 	accel_task->iv = iv;
675 	accel_task->block_size = block_size;
676 	accel_task->flags = flags;
677 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
678 	accel_task->src_domain = NULL;
679 	accel_task->dst_domain = NULL;
680 	accel_task->step_cb_fn = NULL;
681 
682 	return accel_submit_task(accel_ch, accel_task);
683 }
684 
685 int
686 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
687 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
688 			  struct iovec *src_iovs, uint32_t src_iovcnt,
689 			  uint64_t iv, uint32_t block_size, int flags,
690 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
691 {
692 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
693 	struct spdk_accel_task *accel_task;
694 
695 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
696 		return -EINVAL;
697 	}
698 
699 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
700 	if (accel_task == NULL) {
701 		return -ENOMEM;
702 	}
703 
704 	accel_task->crypto_key = key;
705 	accel_task->s.iovs = src_iovs;
706 	accel_task->s.iovcnt = src_iovcnt;
707 	accel_task->d.iovs = dst_iovs;
708 	accel_task->d.iovcnt = dst_iovcnt;
709 	accel_task->iv = iv;
710 	accel_task->block_size = block_size;
711 	accel_task->flags = flags;
712 	accel_task->op_code = ACCEL_OPC_DECRYPT;
713 	accel_task->src_domain = NULL;
714 	accel_task->dst_domain = NULL;
715 	accel_task->step_cb_fn = NULL;
716 
717 	return accel_submit_task(accel_ch, accel_task);
718 }
719 
720 int
721 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
722 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
723 {
724 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
725 	struct spdk_accel_task *accel_task;
726 
727 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
728 	if (accel_task == NULL) {
729 		return -ENOMEM;
730 	}
731 
732 	accel_task->nsrcs.srcs = sources;
733 	accel_task->nsrcs.cnt = nsrcs;
734 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
735 	accel_task->d.iovs[0].iov_base = dst;
736 	accel_task->d.iovs[0].iov_len = nbytes;
737 	accel_task->d.iovcnt = 1;
738 	accel_task->op_code = ACCEL_OPC_XOR;
739 	accel_task->src_domain = NULL;
740 	accel_task->dst_domain = NULL;
741 	accel_task->step_cb_fn = NULL;
742 
743 	return accel_submit_task(accel_ch, accel_task);
744 }
745 
746 static inline struct accel_buffer *
747 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
748 {
749 	struct accel_buffer *buf;
750 
751 	buf = TAILQ_FIRST(&ch->buf_pool);
752 	if (spdk_unlikely(buf == NULL)) {
753 		return NULL;
754 	}
755 
756 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
757 	buf->len = len;
758 	buf->buf = NULL;
759 	buf->seq = NULL;
760 	buf->cb_fn = NULL;
761 
762 	return buf;
763 }
764 
765 static inline void
766 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
767 {
768 	if (buf->buf != NULL) {
769 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
770 	}
771 
772 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
773 }
774 
775 static inline struct spdk_accel_sequence *
776 accel_sequence_get(struct accel_io_channel *ch)
777 {
778 	struct spdk_accel_sequence *seq;
779 
780 	seq = TAILQ_FIRST(&ch->seq_pool);
781 	if (seq == NULL) {
782 		return NULL;
783 	}
784 
785 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
786 
787 	TAILQ_INIT(&seq->tasks);
788 	TAILQ_INIT(&seq->completed);
789 	TAILQ_INIT(&seq->bounce_bufs);
790 
791 	seq->ch = ch;
792 	seq->status = 0;
793 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
794 	seq->in_process_sequence = false;
795 
796 	return seq;
797 }
798 
799 static inline void
800 accel_sequence_put(struct spdk_accel_sequence *seq)
801 {
802 	struct accel_io_channel *ch = seq->ch;
803 	struct accel_buffer *buf;
804 
805 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
806 		buf = TAILQ_FIRST(&seq->bounce_bufs);
807 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
808 		accel_put_buf(seq->ch, buf);
809 	}
810 
811 	assert(TAILQ_EMPTY(&seq->tasks));
812 	assert(TAILQ_EMPTY(&seq->completed));
813 	seq->ch = NULL;
814 
815 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
816 }
817 
818 static void accel_sequence_task_cb(void *cb_arg, int status);
819 
820 static inline struct spdk_accel_task *
821 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
822 			spdk_accel_step_cb cb_fn, void *cb_arg)
823 {
824 	struct spdk_accel_task *task;
825 
826 	task = _get_task(ch, accel_sequence_task_cb, seq);
827 	if (task == NULL) {
828 		return task;
829 	}
830 
831 	task->step_cb_fn = cb_fn;
832 	task->step_cb_arg = cb_arg;
833 
834 	return task;
835 }
836 
837 int
838 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
839 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
840 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
841 		       struct iovec *src_iovs, uint32_t src_iovcnt,
842 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
843 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
844 {
845 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
846 	struct spdk_accel_task *task;
847 	struct spdk_accel_sequence *seq = *pseq;
848 
849 	if (seq == NULL) {
850 		seq = accel_sequence_get(accel_ch);
851 		if (spdk_unlikely(seq == NULL)) {
852 			return -ENOMEM;
853 		}
854 	}
855 
856 	assert(seq->ch == accel_ch);
857 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
858 	if (spdk_unlikely(task == NULL)) {
859 		if (*pseq == NULL) {
860 			accel_sequence_put(seq);
861 		}
862 
863 		return -ENOMEM;
864 	}
865 
866 	task->dst_domain = dst_domain;
867 	task->dst_domain_ctx = dst_domain_ctx;
868 	task->d.iovs = dst_iovs;
869 	task->d.iovcnt = dst_iovcnt;
870 	task->src_domain = src_domain;
871 	task->src_domain_ctx = src_domain_ctx;
872 	task->s.iovs = src_iovs;
873 	task->s.iovcnt = src_iovcnt;
874 	task->flags = flags;
875 	task->op_code = ACCEL_OPC_COPY;
876 
877 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
878 	*pseq = seq;
879 
880 	return 0;
881 }
882 
883 int
884 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
885 		       void *buf, uint64_t len,
886 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
887 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
888 {
889 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
890 	struct spdk_accel_task *task;
891 	struct spdk_accel_sequence *seq = *pseq;
892 
893 	if (seq == NULL) {
894 		seq = accel_sequence_get(accel_ch);
895 		if (spdk_unlikely(seq == NULL)) {
896 			return -ENOMEM;
897 		}
898 	}
899 
900 	assert(seq->ch == accel_ch);
901 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
902 	if (spdk_unlikely(task == NULL)) {
903 		if (*pseq == NULL) {
904 			accel_sequence_put(seq);
905 		}
906 
907 		return -ENOMEM;
908 	}
909 
910 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
911 
912 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
913 	task->d.iovs[0].iov_base = buf;
914 	task->d.iovs[0].iov_len = len;
915 	task->d.iovcnt = 1;
916 	task->src_domain = NULL;
917 	task->dst_domain = domain;
918 	task->dst_domain_ctx = domain_ctx;
919 	task->flags = flags;
920 	task->op_code = ACCEL_OPC_FILL;
921 
922 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
923 	*pseq = seq;
924 
925 	return 0;
926 }
927 
928 int
929 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
930 			     struct iovec *dst_iovs, size_t dst_iovcnt,
931 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
932 			     struct iovec *src_iovs, size_t src_iovcnt,
933 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
934 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
935 {
936 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
937 	struct spdk_accel_task *task;
938 	struct spdk_accel_sequence *seq = *pseq;
939 
940 	if (seq == NULL) {
941 		seq = accel_sequence_get(accel_ch);
942 		if (spdk_unlikely(seq == NULL)) {
943 			return -ENOMEM;
944 		}
945 	}
946 
947 	assert(seq->ch == accel_ch);
948 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
949 	if (spdk_unlikely(task == NULL)) {
950 		if (*pseq == NULL) {
951 			accel_sequence_put(seq);
952 		}
953 
954 		return -ENOMEM;
955 	}
956 
957 	/* TODO: support output_size for chaining */
958 	task->output_size = NULL;
959 	task->dst_domain = dst_domain;
960 	task->dst_domain_ctx = dst_domain_ctx;
961 	task->d.iovs = dst_iovs;
962 	task->d.iovcnt = dst_iovcnt;
963 	task->src_domain = src_domain;
964 	task->src_domain_ctx = src_domain_ctx;
965 	task->s.iovs = src_iovs;
966 	task->s.iovcnt = src_iovcnt;
967 	task->flags = flags;
968 	task->op_code = ACCEL_OPC_DECOMPRESS;
969 
970 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
971 	*pseq = seq;
972 
973 	return 0;
974 }
975 
976 int
977 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
978 			  struct spdk_accel_crypto_key *key,
979 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
980 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
981 			  struct iovec *src_iovs, uint32_t src_iovcnt,
982 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
983 			  uint64_t iv, uint32_t block_size, int flags,
984 			  spdk_accel_step_cb cb_fn, void *cb_arg)
985 {
986 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
987 	struct spdk_accel_task *task;
988 	struct spdk_accel_sequence *seq = *pseq;
989 
990 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
991 			  !block_size)) {
992 		return -EINVAL;
993 	}
994 
995 	if (seq == NULL) {
996 		seq = accel_sequence_get(accel_ch);
997 		if (spdk_unlikely(seq == NULL)) {
998 			return -ENOMEM;
999 		}
1000 	}
1001 
1002 	assert(seq->ch == accel_ch);
1003 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1004 	if (spdk_unlikely(task == NULL)) {
1005 		if (*pseq == NULL) {
1006 			accel_sequence_put(seq);
1007 		}
1008 
1009 		return -ENOMEM;
1010 	}
1011 
1012 	task->crypto_key = key;
1013 	task->src_domain = src_domain;
1014 	task->src_domain_ctx = src_domain_ctx;
1015 	task->s.iovs = src_iovs;
1016 	task->s.iovcnt = src_iovcnt;
1017 	task->dst_domain = dst_domain;
1018 	task->dst_domain_ctx = dst_domain_ctx;
1019 	task->d.iovs = dst_iovs;
1020 	task->d.iovcnt = dst_iovcnt;
1021 	task->iv = iv;
1022 	task->block_size = block_size;
1023 	task->flags = flags;
1024 	task->op_code = ACCEL_OPC_ENCRYPT;
1025 
1026 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1027 	*pseq = seq;
1028 
1029 	return 0;
1030 }
1031 
1032 int
1033 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1034 			  struct spdk_accel_crypto_key *key,
1035 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1036 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1037 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1038 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1039 			  uint64_t iv, uint32_t block_size, int flags,
1040 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1041 {
1042 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1043 	struct spdk_accel_task *task;
1044 	struct spdk_accel_sequence *seq = *pseq;
1045 
1046 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1047 			  !block_size)) {
1048 		return -EINVAL;
1049 	}
1050 
1051 	if (seq == NULL) {
1052 		seq = accel_sequence_get(accel_ch);
1053 		if (spdk_unlikely(seq == NULL)) {
1054 			return -ENOMEM;
1055 		}
1056 	}
1057 
1058 	assert(seq->ch == accel_ch);
1059 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1060 	if (spdk_unlikely(task == NULL)) {
1061 		if (*pseq == NULL) {
1062 			accel_sequence_put(seq);
1063 		}
1064 
1065 		return -ENOMEM;
1066 	}
1067 
1068 	task->crypto_key = key;
1069 	task->src_domain = src_domain;
1070 	task->src_domain_ctx = src_domain_ctx;
1071 	task->s.iovs = src_iovs;
1072 	task->s.iovcnt = src_iovcnt;
1073 	task->dst_domain = dst_domain;
1074 	task->dst_domain_ctx = dst_domain_ctx;
1075 	task->d.iovs = dst_iovs;
1076 	task->d.iovcnt = dst_iovcnt;
1077 	task->iv = iv;
1078 	task->block_size = block_size;
1079 	task->flags = flags;
1080 	task->op_code = ACCEL_OPC_DECRYPT;
1081 
1082 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1083 	*pseq = seq;
1084 
1085 	return 0;
1086 }
1087 
1088 int
1089 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1090 		   struct spdk_memory_domain **domain, void **domain_ctx)
1091 {
1092 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1093 	struct accel_buffer *accel_buf;
1094 
1095 	accel_buf = accel_get_buf(accel_ch, len);
1096 	if (spdk_unlikely(accel_buf == NULL)) {
1097 		return -ENOMEM;
1098 	}
1099 
1100 	/* We always return the same pointer and identify the buffers through domain_ctx */
1101 	*buf = ACCEL_BUFFER_BASE;
1102 	*domain_ctx = accel_buf;
1103 	*domain = g_accel_domain;
1104 
1105 	return 0;
1106 }
1107 
1108 void
1109 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1110 		   struct spdk_memory_domain *domain, void *domain_ctx)
1111 {
1112 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1113 	struct accel_buffer *accel_buf = domain_ctx;
1114 
1115 	assert(domain == g_accel_domain);
1116 	assert(buf == ACCEL_BUFFER_BASE);
1117 
1118 	accel_put_buf(accel_ch, accel_buf);
1119 }
1120 
1121 static void
1122 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1123 {
1124 	struct spdk_accel_task *task;
1125 	struct accel_io_channel *ch = seq->ch;
1126 	spdk_accel_step_cb cb_fn;
1127 	void *cb_arg;
1128 
1129 	while (!TAILQ_EMPTY(&seq->completed)) {
1130 		task = TAILQ_FIRST(&seq->completed);
1131 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1132 		cb_fn = task->step_cb_fn;
1133 		cb_arg = task->step_cb_arg;
1134 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1135 		if (cb_fn != NULL) {
1136 			cb_fn(cb_arg);
1137 		}
1138 	}
1139 
1140 	while (!TAILQ_EMPTY(&seq->tasks)) {
1141 		task = TAILQ_FIRST(&seq->tasks);
1142 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1143 		cb_fn = task->step_cb_fn;
1144 		cb_arg = task->step_cb_arg;
1145 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1146 		if (cb_fn != NULL) {
1147 			cb_fn(cb_arg);
1148 		}
1149 	}
1150 }
1151 
1152 static void
1153 accel_sequence_complete(struct spdk_accel_sequence *seq)
1154 {
1155 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1156 
1157 	accel_update_stats(seq->ch, sequence_executed);
1158 	if (spdk_unlikely(seq->status != 0)) {
1159 		accel_update_stats(seq->ch, sequence_failed);
1160 	}
1161 
1162 	/* First notify all users that appended operations to this sequence */
1163 	accel_sequence_complete_tasks(seq);
1164 
1165 	/* Then notify the user that finished the sequence */
1166 	seq->cb_fn(seq->cb_arg, seq->status);
1167 
1168 	accel_sequence_put(seq);
1169 }
1170 
1171 static void
1172 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1173 {
1174 	uintptr_t offset;
1175 
1176 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1177 	assert(offset < accel_buf->len);
1178 
1179 	diov->iov_base = (char *)accel_buf->buf + offset;
1180 	diov->iov_len = siov->iov_len;
1181 }
1182 
1183 static void
1184 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1185 {
1186 	struct spdk_accel_task *task;
1187 	struct iovec *iov;
1188 
1189 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1190 	 * in a sequence that were using it.
1191 	 */
1192 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1193 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1194 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1195 			assert(task->s.iovcnt == 1);
1196 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1197 			task->src_domain = NULL;
1198 			task->s.iovs = iov;
1199 		}
1200 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1201 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1202 			assert(task->d.iovcnt == 1);
1203 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1204 			task->dst_domain = NULL;
1205 			task->d.iovs = iov;
1206 		}
1207 	}
1208 }
1209 
1210 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1211 
1212 static void
1213 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1214 {
1215 	struct accel_buffer *accel_buf;
1216 
1217 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1218 
1219 	assert(accel_buf->seq != NULL);
1220 	assert(accel_buf->buf == NULL);
1221 	accel_buf->buf = buf;
1222 
1223 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1224 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1225 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1226 	accel_process_sequence(accel_buf->seq);
1227 }
1228 
1229 static bool
1230 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1231 			 spdk_iobuf_get_cb cb_fn)
1232 {
1233 	struct accel_io_channel *ch = seq->ch;
1234 
1235 	assert(buf->buf == NULL);
1236 	assert(buf->seq == NULL);
1237 
1238 	buf->seq = seq;
1239 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1240 	if (buf->buf == NULL) {
1241 		return false;
1242 	}
1243 
1244 	return true;
1245 }
1246 
1247 static bool
1248 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1249 {
1250 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1251 	 * NULL */
1252 	if (task->src_domain == g_accel_domain) {
1253 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1254 					      accel_iobuf_get_virtbuf_cb)) {
1255 			return false;
1256 		}
1257 
1258 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1259 	}
1260 
1261 	if (task->dst_domain == g_accel_domain) {
1262 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1263 					      accel_iobuf_get_virtbuf_cb)) {
1264 			return false;
1265 		}
1266 
1267 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1268 	}
1269 
1270 	return true;
1271 }
1272 
1273 static void
1274 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1275 {
1276 	struct accel_buffer *accel_buf;
1277 
1278 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1279 
1280 	assert(accel_buf->seq != NULL);
1281 	assert(accel_buf->buf == NULL);
1282 	accel_buf->buf = buf;
1283 
1284 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1285 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1286 }
1287 
1288 bool
1289 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1290 			      struct spdk_memory_domain *domain, void *domain_ctx,
1291 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1292 {
1293 	struct accel_buffer *accel_buf = domain_ctx;
1294 
1295 	assert(domain == g_accel_domain);
1296 	accel_buf->cb_fn = cb_fn;
1297 	accel_buf->cb_ctx = cb_ctx;
1298 
1299 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1300 		return false;
1301 	}
1302 
1303 	accel_sequence_set_virtbuf(seq, accel_buf);
1304 
1305 	return true;
1306 }
1307 
1308 struct spdk_accel_task *
1309 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1310 {
1311 	return TAILQ_FIRST(&seq->tasks);
1312 }
1313 
1314 struct spdk_accel_task *
1315 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1316 {
1317 	return TAILQ_NEXT(task, seq_link);
1318 }
1319 
1320 static inline uint64_t
1321 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1322 {
1323 	uint64_t result = 0;
1324 	uint32_t i;
1325 
1326 	for (i = 0; i < iovcnt; ++i) {
1327 		result += iovs[i].iov_len;
1328 	}
1329 
1330 	return result;
1331 }
1332 
1333 static inline void
1334 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1335 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1336 			struct accel_buffer *buf)
1337 {
1338 	bounce->orig_iovs = *iovs;
1339 	bounce->orig_iovcnt = *iovcnt;
1340 	bounce->orig_domain = *domain;
1341 	bounce->orig_domain_ctx = *domain_ctx;
1342 	bounce->iov.iov_base = buf->buf;
1343 	bounce->iov.iov_len = buf->len;
1344 
1345 	*iovs = &bounce->iov;
1346 	*iovcnt = 1;
1347 	*domain = NULL;
1348 }
1349 
1350 static void
1351 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1352 {
1353 	struct spdk_accel_task *task;
1354 	struct accel_buffer *accel_buf;
1355 
1356 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1357 	assert(accel_buf->buf == NULL);
1358 	accel_buf->buf = buf;
1359 
1360 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1361 	assert(task != NULL);
1362 
1363 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1364 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1365 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1366 				&task->src_domain_ctx, accel_buf);
1367 	accel_process_sequence(accel_buf->seq);
1368 }
1369 
1370 static void
1371 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1372 {
1373 	struct spdk_accel_task *task;
1374 	struct accel_buffer *accel_buf;
1375 
1376 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1377 	assert(accel_buf->buf == NULL);
1378 	accel_buf->buf = buf;
1379 
1380 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1381 	assert(task != NULL);
1382 
1383 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1384 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1385 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1386 				&task->dst_domain_ctx, accel_buf);
1387 	accel_process_sequence(accel_buf->seq);
1388 }
1389 
1390 static int
1391 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1392 {
1393 	struct accel_buffer *buf;
1394 
1395 	if (task->src_domain != NULL) {
1396 		/* By the time we're here, accel buffers should have been allocated */
1397 		assert(task->src_domain != g_accel_domain);
1398 
1399 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1400 		if (buf == NULL) {
1401 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1402 			return -ENOMEM;
1403 		}
1404 
1405 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1406 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1407 			return -EAGAIN;
1408 		}
1409 
1410 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1411 					&task->src_domain, &task->src_domain_ctx, buf);
1412 	}
1413 
1414 	if (task->dst_domain != NULL) {
1415 		/* By the time we're here, accel buffers should have been allocated */
1416 		assert(task->dst_domain != g_accel_domain);
1417 
1418 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1419 		if (buf == NULL) {
1420 			/* The src buffer will be released when a sequence is completed */
1421 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1422 			return -ENOMEM;
1423 		}
1424 
1425 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1426 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1427 			return -EAGAIN;
1428 		}
1429 
1430 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1431 					&task->dst_domain, &task->dst_domain_ctx, buf);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static void
1438 accel_task_pull_data_cb(void *ctx, int status)
1439 {
1440 	struct spdk_accel_sequence *seq = ctx;
1441 
1442 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1443 	if (spdk_likely(status == 0)) {
1444 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1445 	} else {
1446 		accel_sequence_set_fail(seq, status);
1447 	}
1448 
1449 	accel_process_sequence(seq);
1450 }
1451 
1452 static void
1453 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1454 {
1455 	int rc;
1456 
1457 	assert(task->bounce.s.orig_iovs != NULL);
1458 	assert(task->bounce.s.orig_domain != NULL);
1459 	assert(task->bounce.s.orig_domain != g_accel_domain);
1460 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1461 
1462 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1463 					  task->bounce.s.orig_domain_ctx,
1464 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1465 					  task->s.iovs, task->s.iovcnt,
1466 					  accel_task_pull_data_cb, seq);
1467 	if (spdk_unlikely(rc != 0)) {
1468 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1469 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1470 		accel_sequence_set_fail(seq, rc);
1471 	}
1472 }
1473 
1474 static void
1475 accel_task_push_data_cb(void *ctx, int status)
1476 {
1477 	struct spdk_accel_sequence *seq = ctx;
1478 
1479 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1480 	if (spdk_likely(status == 0)) {
1481 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1482 	} else {
1483 		accel_sequence_set_fail(seq, status);
1484 	}
1485 
1486 	accel_process_sequence(seq);
1487 }
1488 
1489 static void
1490 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1491 {
1492 	int rc;
1493 
1494 	assert(task->bounce.d.orig_iovs != NULL);
1495 	assert(task->bounce.d.orig_domain != NULL);
1496 	assert(task->bounce.d.orig_domain != g_accel_domain);
1497 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1498 
1499 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1500 					  task->bounce.d.orig_domain_ctx,
1501 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1502 					  task->d.iovs, task->d.iovcnt,
1503 					  accel_task_push_data_cb, seq);
1504 	if (spdk_unlikely(rc != 0)) {
1505 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1506 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1507 		accel_sequence_set_fail(seq, rc);
1508 	}
1509 }
1510 
1511 static void
1512 accel_process_sequence(struct spdk_accel_sequence *seq)
1513 {
1514 	struct accel_io_channel *accel_ch = seq->ch;
1515 	struct spdk_accel_task *task;
1516 	enum accel_sequence_state state;
1517 	int rc;
1518 
1519 	/* Prevent recursive calls to this function */
1520 	if (spdk_unlikely(seq->in_process_sequence)) {
1521 		return;
1522 	}
1523 	seq->in_process_sequence = true;
1524 
1525 	task = TAILQ_FIRST(&seq->tasks);
1526 	assert(task != NULL);
1527 
1528 	do {
1529 		state = seq->state;
1530 		switch (state) {
1531 		case ACCEL_SEQUENCE_STATE_INIT:
1532 			if (g_accel_driver != NULL) {
1533 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1534 				break;
1535 			}
1536 		/* Fall through */
1537 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1538 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1539 			if (!accel_sequence_check_virtbuf(seq, task)) {
1540 				/* We couldn't allocate a buffer, wait until one is available */
1541 				break;
1542 			}
1543 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1544 		/* Fall through */
1545 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1546 			/* If a module supports memory domains, we don't need to allocate bounce
1547 			 * buffers */
1548 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1549 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1550 				break;
1551 			}
1552 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1553 			rc = accel_sequence_check_bouncebuf(seq, task);
1554 			if (rc != 0) {
1555 				/* We couldn't allocate a buffer, wait until one is available */
1556 				if (rc == -EAGAIN) {
1557 					break;
1558 				}
1559 				accel_sequence_set_fail(seq, rc);
1560 				break;
1561 			}
1562 			if (task->bounce.s.orig_iovs != NULL) {
1563 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1564 				break;
1565 			}
1566 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1567 		/* Fall through */
1568 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1569 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1570 				      g_opcode_strings[task->op_code], seq);
1571 
1572 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1573 			rc = accel_submit_task(accel_ch, task);
1574 			if (spdk_unlikely(rc != 0)) {
1575 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1576 					    g_opcode_strings[task->op_code], seq);
1577 				accel_sequence_set_fail(seq, rc);
1578 			}
1579 			break;
1580 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1581 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1582 			accel_task_pull_data(seq, task);
1583 			break;
1584 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1585 			if (task->bounce.d.orig_iovs != NULL) {
1586 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1587 				break;
1588 			}
1589 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1590 			break;
1591 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1592 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1593 			accel_task_push_data(seq, task);
1594 			break;
1595 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1596 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1597 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1598 			/* Check if there are any remaining tasks */
1599 			task = TAILQ_FIRST(&seq->tasks);
1600 			if (task == NULL) {
1601 				/* Immediately return here to make sure we don't touch the sequence
1602 				 * after it's completed */
1603 				accel_sequence_complete(seq);
1604 				return;
1605 			}
1606 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1607 			break;
1608 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1609 			assert(!TAILQ_EMPTY(&seq->tasks));
1610 
1611 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1612 			rc = g_accel_driver->execute_sequence(seq);
1613 			if (spdk_unlikely(rc != 0)) {
1614 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1615 					    seq, g_accel_driver->name);
1616 				accel_sequence_set_fail(seq, rc);
1617 			}
1618 			break;
1619 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1620 			task = TAILQ_FIRST(&seq->tasks);
1621 			if (task == NULL) {
1622 				/* Immediately return here to make sure we don't touch the sequence
1623 				 * after it's completed */
1624 				accel_sequence_complete(seq);
1625 				return;
1626 			}
1627 			/* We don't want to execute the next task through the driver, so we
1628 			 * explicitly omit the INIT state here */
1629 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1630 			break;
1631 		case ACCEL_SEQUENCE_STATE_ERROR:
1632 			/* Immediately return here to make sure we don't touch the sequence
1633 			 * after it's completed */
1634 			assert(seq->status != 0);
1635 			accel_sequence_complete(seq);
1636 			return;
1637 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1638 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1639 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1640 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1641 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1642 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1643 			break;
1644 		default:
1645 			assert(0 && "bad state");
1646 			break;
1647 		}
1648 	} while (seq->state != state);
1649 
1650 	seq->in_process_sequence = false;
1651 }
1652 
1653 static void
1654 accel_sequence_task_cb(void *cb_arg, int status)
1655 {
1656 	struct spdk_accel_sequence *seq = cb_arg;
1657 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1658 	struct accel_io_channel *accel_ch = seq->ch;
1659 
1660 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1661 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1662 	 * easiest way to prevent this, even though it is a bit hacky.
1663 	 */
1664 	assert(task != NULL);
1665 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1666 
1667 	switch (seq->state) {
1668 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1669 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1670 		if (spdk_unlikely(status != 0)) {
1671 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1672 				    g_opcode_strings[task->op_code], seq);
1673 			accel_sequence_set_fail(seq, status);
1674 		}
1675 
1676 		accel_process_sequence(seq);
1677 		break;
1678 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1679 		assert(g_accel_driver != NULL);
1680 		/* Immediately remove the task from the outstanding list to make sure the next call
1681 		 * to spdk_accel_sequence_first_task() doesn't return it */
1682 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1683 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1684 
1685 		if (spdk_unlikely(status != 0)) {
1686 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1687 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1688 				    g_accel_driver->name);
1689 			/* Update status without using accel_sequence_set_fail() to avoid changing
1690 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1691 			seq->status = status;
1692 		}
1693 		break;
1694 	default:
1695 		assert(0 && "bad state");
1696 		break;
1697 	}
1698 }
1699 
1700 void
1701 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1702 {
1703 	assert(g_accel_driver != NULL);
1704 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1705 
1706 	if (spdk_likely(seq->status == 0)) {
1707 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1708 	} else {
1709 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1710 	}
1711 
1712 	accel_process_sequence(seq);
1713 }
1714 
1715 static bool
1716 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1717 {
1718 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1719 	if (iovacnt != iovbcnt) {
1720 		return false;
1721 	}
1722 
1723 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1724 }
1725 
1726 static bool
1727 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1728 {
1729 	if (task->dst_domain != next->src_domain) {
1730 		return false;
1731 	}
1732 
1733 	if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1734 				next->s.iovs, next->s.iovcnt)) {
1735 		return false;
1736 	}
1737 
1738 	task->d.iovs = next->d.iovs;
1739 	task->d.iovcnt = next->d.iovcnt;
1740 	task->dst_domain = next->dst_domain;
1741 	task->dst_domain_ctx = next->dst_domain_ctx;
1742 
1743 	return true;
1744 }
1745 
1746 static void
1747 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1748 			   struct spdk_accel_task **next_task)
1749 {
1750 	struct spdk_accel_task *next = *next_task;
1751 
1752 	switch (task->op_code) {
1753 	case ACCEL_OPC_COPY:
1754 		/* We only allow changing src of operations that actually have a src, e.g. we never
1755 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1756 		 * change the src of the operation after fill (which in turn could also be a fill).
1757 		 * So, for the sake of simplicity, skip this type of operations for now.
1758 		 */
1759 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1760 		    next->op_code != ACCEL_OPC_COPY &&
1761 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1762 		    next->op_code != ACCEL_OPC_DECRYPT) {
1763 			break;
1764 		}
1765 		if (task->dst_domain != next->src_domain) {
1766 			break;
1767 		}
1768 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1769 					next->s.iovs, next->s.iovcnt)) {
1770 			break;
1771 		}
1772 		next->s.iovs = task->s.iovs;
1773 		next->s.iovcnt = task->s.iovcnt;
1774 		next->src_domain = task->src_domain;
1775 		next->src_domain_ctx = task->src_domain_ctx;
1776 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1777 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1778 		break;
1779 	case ACCEL_OPC_DECOMPRESS:
1780 	case ACCEL_OPC_FILL:
1781 	case ACCEL_OPC_ENCRYPT:
1782 	case ACCEL_OPC_DECRYPT:
1783 		/* We can only merge tasks when one of them is a copy */
1784 		if (next->op_code != ACCEL_OPC_COPY) {
1785 			break;
1786 		}
1787 		if (!accel_task_set_dstbuf(task, next)) {
1788 			break;
1789 		}
1790 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1791 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1792 		*next_task = TAILQ_NEXT(next, seq_link);
1793 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1794 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1795 		break;
1796 	default:
1797 		assert(0 && "bad opcode");
1798 		break;
1799 	}
1800 }
1801 
1802 void
1803 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1804 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1805 {
1806 	struct spdk_accel_task *task, *next;
1807 
1808 	/* Try to remove any copy operations if possible */
1809 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1810 		if (next == NULL) {
1811 			break;
1812 		}
1813 		accel_sequence_merge_tasks(seq, task, &next);
1814 	}
1815 
1816 	seq->cb_fn = cb_fn;
1817 	seq->cb_arg = cb_arg;
1818 
1819 	accel_process_sequence(seq);
1820 }
1821 
1822 void
1823 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1824 {
1825 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1826 	struct spdk_accel_task *task;
1827 
1828 	assert(TAILQ_EMPTY(&seq->completed));
1829 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1830 
1831 	while (!TAILQ_EMPTY(&tasks)) {
1832 		task = TAILQ_FIRST(&tasks);
1833 		TAILQ_REMOVE(&tasks, task, seq_link);
1834 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1835 	}
1836 }
1837 
1838 void
1839 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1840 {
1841 	if (seq == NULL) {
1842 		return;
1843 	}
1844 
1845 	accel_sequence_complete_tasks(seq);
1846 	accel_sequence_put(seq);
1847 }
1848 
1849 struct spdk_memory_domain *
1850 spdk_accel_get_memory_domain(void)
1851 {
1852 	return g_accel_domain;
1853 }
1854 
1855 static struct spdk_accel_module_if *
1856 _module_find_by_name(const char *name)
1857 {
1858 	struct spdk_accel_module_if *accel_module = NULL;
1859 
1860 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1861 		if (strcmp(name, accel_module->name) == 0) {
1862 			break;
1863 		}
1864 	}
1865 
1866 	return accel_module;
1867 }
1868 
1869 static inline struct spdk_accel_crypto_key *
1870 _accel_crypto_key_get(const char *name)
1871 {
1872 	struct spdk_accel_crypto_key *key;
1873 
1874 	assert(spdk_spin_held(&g_keyring_spin));
1875 
1876 	TAILQ_FOREACH(key, &g_keyring, link) {
1877 		if (strcmp(name, key->param.key_name) == 0) {
1878 			return key;
1879 		}
1880 	}
1881 
1882 	return NULL;
1883 }
1884 
1885 static void
1886 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1887 {
1888 	if (key->param.hex_key) {
1889 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1890 		free(key->param.hex_key);
1891 	}
1892 	if (key->param.hex_key2) {
1893 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1894 		free(key->param.hex_key2);
1895 	}
1896 	free(key->param.key_name);
1897 	free(key->param.cipher);
1898 	if (key->key) {
1899 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1900 		free(key->key);
1901 	}
1902 	if (key->key2) {
1903 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1904 		free(key->key2);
1905 	}
1906 	free(key);
1907 }
1908 
1909 static void
1910 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1911 {
1912 	assert(key->module_if);
1913 	assert(key->module_if->crypto_key_deinit);
1914 
1915 	key->module_if->crypto_key_deinit(key);
1916 	accel_crypto_key_free_mem(key);
1917 }
1918 
1919 /*
1920  * This function mitigates a timing side channel which could be caused by using strcmp()
1921  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1922  * the article [1] for more details
1923  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1924  */
1925 static bool
1926 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1927 {
1928 	size_t i;
1929 	volatile size_t x = k1_len ^ k2_len;
1930 
1931 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1932 		x |= k1[i] ^ k2[i];
1933 	}
1934 
1935 	return x == 0;
1936 }
1937 
1938 int
1939 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1940 {
1941 	struct spdk_accel_module_if *module;
1942 	struct spdk_accel_crypto_key *key;
1943 	size_t hex_key_size, hex_key2_size;
1944 	int rc;
1945 
1946 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1947 		return -EINVAL;
1948 	}
1949 
1950 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1951 		/* hardly ever possible, but let's check and warn the user */
1952 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1953 	}
1954 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1955 
1956 	if (!module) {
1957 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1958 		return -ENOENT;
1959 	}
1960 	if (!module->crypto_key_init) {
1961 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1962 		return -ENOTSUP;
1963 	}
1964 
1965 	key = calloc(1, sizeof(*key));
1966 	if (!key) {
1967 		return -ENOMEM;
1968 	}
1969 
1970 	key->param.key_name = strdup(param->key_name);
1971 	if (!key->param.key_name) {
1972 		rc = -ENOMEM;
1973 		goto error;
1974 	}
1975 
1976 	key->param.cipher = strdup(param->cipher);
1977 	if (!key->param.cipher) {
1978 		rc = -ENOMEM;
1979 		goto error;
1980 	}
1981 
1982 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1983 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1984 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1985 		rc = -EINVAL;
1986 		goto error;
1987 	}
1988 	key->param.hex_key = strdup(param->hex_key);
1989 	if (!key->param.hex_key) {
1990 		rc = -ENOMEM;
1991 		goto error;
1992 	}
1993 
1994 	key->key_size = hex_key_size / 2;
1995 	key->key = spdk_unhexlify(key->param.hex_key);
1996 	if (!key->key) {
1997 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1998 		rc = -EINVAL;
1999 		goto error;
2000 	}
2001 
2002 	if (param->hex_key2) {
2003 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2004 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2005 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2006 			rc = -EINVAL;
2007 			goto error;
2008 		}
2009 		key->param.hex_key2 = strdup(param->hex_key2);
2010 		if (!key->param.hex_key2) {
2011 			rc = -ENOMEM;
2012 			goto error;
2013 		}
2014 
2015 		key->key2_size = hex_key2_size / 2;
2016 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2017 		if (!key->key2) {
2018 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2019 			rc = -EINVAL;
2020 			goto error;
2021 		}
2022 
2023 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2024 			SPDK_ERRLOG("Identical keys are not secure\n");
2025 			rc = -EINVAL;
2026 			goto error;
2027 		}
2028 	}
2029 
2030 	key->module_if = module;
2031 
2032 	spdk_spin_lock(&g_keyring_spin);
2033 	if (_accel_crypto_key_get(param->key_name)) {
2034 		rc = -EEXIST;
2035 	} else {
2036 		rc = module->crypto_key_init(key);
2037 		if (!rc) {
2038 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2039 		}
2040 	}
2041 	spdk_spin_unlock(&g_keyring_spin);
2042 
2043 	if (rc) {
2044 		goto error;
2045 	}
2046 
2047 	return 0;
2048 
2049 error:
2050 	accel_crypto_key_free_mem(key);
2051 	return rc;
2052 }
2053 
2054 int
2055 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2056 {
2057 	if (!key || !key->module_if) {
2058 		return -EINVAL;
2059 	}
2060 
2061 	spdk_spin_lock(&g_keyring_spin);
2062 	if (!_accel_crypto_key_get(key->param.key_name)) {
2063 		spdk_spin_unlock(&g_keyring_spin);
2064 		return -ENOENT;
2065 	}
2066 	TAILQ_REMOVE(&g_keyring, key, link);
2067 	spdk_spin_unlock(&g_keyring_spin);
2068 
2069 	accel_crypto_key_destroy_unsafe(key);
2070 
2071 	return 0;
2072 }
2073 
2074 struct spdk_accel_crypto_key *
2075 spdk_accel_crypto_key_get(const char *name)
2076 {
2077 	struct spdk_accel_crypto_key *key;
2078 
2079 	spdk_spin_lock(&g_keyring_spin);
2080 	key = _accel_crypto_key_get(name);
2081 	spdk_spin_unlock(&g_keyring_spin);
2082 
2083 	return key;
2084 }
2085 
2086 /* Helper function when accel modules register with the framework. */
2087 void
2088 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2089 {
2090 	if (_module_find_by_name(accel_module->name)) {
2091 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2092 		assert(false);
2093 		return;
2094 	}
2095 
2096 	/* Make sure that the software module is at the head of the list, this
2097 	 * will assure that all opcodes are later assigned to software first and
2098 	 * then updated to HW modules as they are registered.
2099 	 */
2100 	if (strcmp(accel_module->name, "software") == 0) {
2101 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2102 	} else {
2103 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2104 	}
2105 
2106 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2107 		g_max_accel_module_size = accel_module->get_ctx_size();
2108 	}
2109 }
2110 
2111 /* Framework level channel create callback. */
2112 static int
2113 accel_create_channel(void *io_device, void *ctx_buf)
2114 {
2115 	struct accel_io_channel	*accel_ch = ctx_buf;
2116 	struct spdk_accel_task *accel_task;
2117 	struct spdk_accel_sequence *seq;
2118 	struct accel_buffer *buf;
2119 	uint8_t *task_mem;
2120 	uint32_t i = 0, j;
2121 	int rc;
2122 
2123 	accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size);
2124 	if (accel_ch->task_pool_base == NULL) {
2125 		return -ENOMEM;
2126 	}
2127 
2128 	accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence));
2129 	if (accel_ch->seq_pool_base == NULL) {
2130 		goto err;
2131 	}
2132 
2133 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2134 	if (accel_ch->buf_pool_base == NULL) {
2135 		goto err;
2136 	}
2137 
2138 	TAILQ_INIT(&accel_ch->task_pool);
2139 	TAILQ_INIT(&accel_ch->seq_pool);
2140 	TAILQ_INIT(&accel_ch->buf_pool);
2141 
2142 	task_mem = accel_ch->task_pool_base;
2143 	for (i = 0; i < g_opts.task_count; i++) {
2144 		accel_task = (struct spdk_accel_task *)task_mem;
2145 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2146 		task_mem += g_max_accel_module_size;
2147 	}
2148 	for (i = 0; i < g_opts.sequence_count; i++) {
2149 		seq = &accel_ch->seq_pool_base[i];
2150 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2151 	}
2152 	for (i = 0; i < g_opts.buf_count; i++) {
2153 		buf = &accel_ch->buf_pool_base[i];
2154 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2155 	}
2156 
2157 	/* Assign modules and get IO channels for each */
2158 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2159 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2160 		/* This can happen if idxd runs out of channels. */
2161 		if (accel_ch->module_ch[i] == NULL) {
2162 			goto err;
2163 		}
2164 	}
2165 
2166 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2167 				     g_opts.large_cache_size);
2168 	if (rc != 0) {
2169 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2170 		goto err;
2171 	}
2172 
2173 	return 0;
2174 err:
2175 	for (j = 0; j < i; j++) {
2176 		spdk_put_io_channel(accel_ch->module_ch[j]);
2177 	}
2178 	free(accel_ch->task_pool_base);
2179 	free(accel_ch->seq_pool_base);
2180 	free(accel_ch->buf_pool_base);
2181 
2182 	return -ENOMEM;
2183 }
2184 
2185 static void
2186 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2187 {
2188 	int i;
2189 
2190 	total->sequence_executed += stats->sequence_executed;
2191 	total->sequence_failed += stats->sequence_failed;
2192 	for (i = 0; i < ACCEL_OPC_LAST; ++i) {
2193 		total->operations[i].executed += stats->operations[i].executed;
2194 		total->operations[i].failed += stats->operations[i].failed;
2195 	}
2196 }
2197 
2198 /* Framework level channel destroy callback. */
2199 static void
2200 accel_destroy_channel(void *io_device, void *ctx_buf)
2201 {
2202 	struct accel_io_channel	*accel_ch = ctx_buf;
2203 	int i;
2204 
2205 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2206 
2207 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2208 		assert(accel_ch->module_ch[i] != NULL);
2209 		spdk_put_io_channel(accel_ch->module_ch[i]);
2210 		accel_ch->module_ch[i] = NULL;
2211 	}
2212 
2213 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2214 	spdk_spin_lock(&g_stats_lock);
2215 	accel_add_stats(&g_stats, &accel_ch->stats);
2216 	spdk_spin_unlock(&g_stats_lock);
2217 
2218 	free(accel_ch->task_pool_base);
2219 	free(accel_ch->seq_pool_base);
2220 	free(accel_ch->buf_pool_base);
2221 }
2222 
2223 struct spdk_io_channel *
2224 spdk_accel_get_io_channel(void)
2225 {
2226 	return spdk_get_io_channel(&spdk_accel_module_list);
2227 }
2228 
2229 static void
2230 accel_module_initialize(void)
2231 {
2232 	struct spdk_accel_module_if *accel_module;
2233 
2234 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2235 		accel_module->module_init();
2236 	}
2237 }
2238 
2239 static void
2240 accel_module_init_opcode(enum accel_opcode opcode)
2241 {
2242 	struct accel_module *module = &g_modules_opc[opcode];
2243 	struct spdk_accel_module_if *module_if = module->module;
2244 
2245 	if (module_if->get_memory_domains != NULL) {
2246 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2247 	}
2248 }
2249 
2250 int
2251 spdk_accel_initialize(void)
2252 {
2253 	enum accel_opcode op;
2254 	struct spdk_accel_module_if *accel_module = NULL;
2255 	int rc;
2256 
2257 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2258 				       "SPDK_ACCEL_DMA_DEVICE");
2259 	if (rc != 0) {
2260 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2261 		return rc;
2262 	}
2263 
2264 	spdk_spin_init(&g_keyring_spin);
2265 	spdk_spin_init(&g_stats_lock);
2266 
2267 	g_modules_started = true;
2268 	accel_module_initialize();
2269 
2270 	/* Create our priority global map of opcodes to modules, we populate starting
2271 	 * with the software module (guaranteed to be first on the list) and then
2272 	 * updating opcodes with HW modules that have been initialized.
2273 	 * NOTE: all opcodes must be supported by software in the event that no HW
2274 	 * modules are initialized to support the operation.
2275 	 */
2276 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2277 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2278 			if (accel_module->supports_opcode(op)) {
2279 				g_modules_opc[op].module = accel_module;
2280 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2281 			}
2282 		}
2283 	}
2284 
2285 	/* Now lets check for overrides and apply all that exist */
2286 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2287 		if (g_modules_opc_override[op] != NULL) {
2288 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2289 			if (accel_module == NULL) {
2290 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2291 				rc = -EINVAL;
2292 				goto error;
2293 			}
2294 			if (accel_module->supports_opcode(op) == false) {
2295 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2296 				rc = -EINVAL;
2297 				goto error;
2298 			}
2299 			g_modules_opc[op].module = accel_module;
2300 		}
2301 	}
2302 
2303 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2304 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2305 		rc = -EINVAL;
2306 		goto error;
2307 	}
2308 
2309 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2310 		assert(g_modules_opc[op].module != NULL);
2311 		accel_module_init_opcode(op);
2312 	}
2313 
2314 	rc = spdk_iobuf_register_module("accel");
2315 	if (rc != 0) {
2316 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2317 		goto error;
2318 	}
2319 
2320 	/*
2321 	 * We need a unique identifier for the accel framework, so use the
2322 	 * spdk_accel_module_list address for this purpose.
2323 	 */
2324 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2325 				sizeof(struct accel_io_channel), "accel");
2326 
2327 	return 0;
2328 error:
2329 	spdk_memory_domain_destroy(g_accel_domain);
2330 
2331 	return rc;
2332 }
2333 
2334 static void
2335 accel_module_finish_cb(void)
2336 {
2337 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2338 
2339 	spdk_memory_domain_destroy(g_accel_domain);
2340 
2341 	cb_fn(g_fini_cb_arg);
2342 	g_fini_cb_fn = NULL;
2343 	g_fini_cb_arg = NULL;
2344 }
2345 
2346 static void
2347 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2348 			   const char *module_str)
2349 {
2350 	spdk_json_write_object_begin(w);
2351 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2352 	spdk_json_write_named_object_begin(w, "params");
2353 	spdk_json_write_named_string(w, "opname", opc_str);
2354 	spdk_json_write_named_string(w, "module", module_str);
2355 	spdk_json_write_object_end(w);
2356 	spdk_json_write_object_end(w);
2357 }
2358 
2359 static void
2360 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2361 {
2362 	spdk_json_write_named_string(w, "name", key->param.key_name);
2363 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2364 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2365 	if (key->param.hex_key2) {
2366 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2367 	}
2368 }
2369 
2370 void
2371 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2372 {
2373 	spdk_json_write_object_begin(w);
2374 	__accel_crypto_key_dump_param(w, key);
2375 	spdk_json_write_object_end(w);
2376 }
2377 
2378 static void
2379 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2380 				    struct spdk_accel_crypto_key *key)
2381 {
2382 	spdk_json_write_object_begin(w);
2383 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2384 	spdk_json_write_named_object_begin(w, "params");
2385 	__accel_crypto_key_dump_param(w, key);
2386 	spdk_json_write_object_end(w);
2387 	spdk_json_write_object_end(w);
2388 }
2389 
2390 static void
2391 accel_write_options(struct spdk_json_write_ctx *w)
2392 {
2393 	spdk_json_write_object_begin(w);
2394 	spdk_json_write_named_string(w, "method", "accel_set_options");
2395 	spdk_json_write_named_object_begin(w, "params");
2396 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2397 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2398 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2399 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2400 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2401 	spdk_json_write_object_end(w);
2402 	spdk_json_write_object_end(w);
2403 }
2404 
2405 static void
2406 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2407 {
2408 	struct spdk_accel_crypto_key *key;
2409 
2410 	spdk_spin_lock(&g_keyring_spin);
2411 	TAILQ_FOREACH(key, &g_keyring, link) {
2412 		if (full_dump) {
2413 			_accel_crypto_key_write_config_json(w, key);
2414 		} else {
2415 			_accel_crypto_key_dump_param(w, key);
2416 		}
2417 	}
2418 	spdk_spin_unlock(&g_keyring_spin);
2419 }
2420 
2421 void
2422 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2423 {
2424 	_accel_crypto_keys_write_config_json(w, false);
2425 }
2426 
2427 void
2428 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2429 {
2430 	struct spdk_accel_module_if *accel_module;
2431 	int i;
2432 
2433 	spdk_json_write_array_begin(w);
2434 	accel_write_options(w);
2435 
2436 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2437 		if (accel_module->write_config_json) {
2438 			accel_module->write_config_json(w);
2439 		}
2440 	}
2441 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2442 		if (g_modules_opc_override[i]) {
2443 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2444 		}
2445 	}
2446 
2447 	_accel_crypto_keys_write_config_json(w, true);
2448 
2449 	spdk_json_write_array_end(w);
2450 }
2451 
2452 void
2453 spdk_accel_module_finish(void)
2454 {
2455 	if (!g_accel_module) {
2456 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2457 	} else {
2458 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2459 	}
2460 
2461 	if (!g_accel_module) {
2462 		spdk_spin_destroy(&g_keyring_spin);
2463 		accel_module_finish_cb();
2464 		return;
2465 	}
2466 
2467 	if (g_accel_module->module_fini) {
2468 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2469 	} else {
2470 		spdk_accel_module_finish();
2471 	}
2472 }
2473 
2474 void
2475 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2476 {
2477 	struct spdk_accel_crypto_key *key, *key_tmp;
2478 	enum accel_opcode op;
2479 
2480 	assert(cb_fn != NULL);
2481 
2482 	g_fini_cb_fn = cb_fn;
2483 	g_fini_cb_arg = cb_arg;
2484 
2485 	spdk_spin_lock(&g_keyring_spin);
2486 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2487 		accel_crypto_key_destroy_unsafe(key);
2488 	}
2489 	spdk_spin_unlock(&g_keyring_spin);
2490 
2491 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2492 		if (g_modules_opc_override[op] != NULL) {
2493 			free(g_modules_opc_override[op]);
2494 			g_modules_opc_override[op] = NULL;
2495 		}
2496 		g_modules_opc[op].module = NULL;
2497 	}
2498 
2499 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2500 	spdk_accel_module_finish();
2501 }
2502 
2503 static struct spdk_accel_driver *
2504 accel_find_driver(const char *name)
2505 {
2506 	struct spdk_accel_driver *driver;
2507 
2508 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2509 		if (strcmp(driver->name, name) == 0) {
2510 			return driver;
2511 		}
2512 	}
2513 
2514 	return NULL;
2515 }
2516 
2517 int
2518 spdk_accel_set_driver(const char *name)
2519 {
2520 	struct spdk_accel_driver *driver;
2521 
2522 	driver = accel_find_driver(name);
2523 	if (driver == NULL) {
2524 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2525 		return -ENODEV;
2526 	}
2527 
2528 	g_accel_driver = driver;
2529 
2530 	return 0;
2531 }
2532 
2533 void
2534 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2535 {
2536 	if (accel_find_driver(driver->name)) {
2537 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2538 		assert(0);
2539 		return;
2540 	}
2541 
2542 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2543 }
2544 
2545 int
2546 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2547 {
2548 	if (opts->size > sizeof(*opts)) {
2549 		return -EINVAL;
2550 	}
2551 
2552 	memcpy(&g_opts, opts, opts->size);
2553 
2554 	return 0;
2555 }
2556 
2557 void
2558 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2559 {
2560 	size_t size = opts->size;
2561 
2562 	assert(size <= sizeof(*opts));
2563 
2564 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2565 	opts->size = size;
2566 }
2567 
2568 struct accel_get_stats_ctx {
2569 	struct accel_stats	stats;
2570 	accel_get_stats_cb	cb_fn;
2571 	void			*cb_arg;
2572 };
2573 
2574 static void
2575 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2576 {
2577 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2578 
2579 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2580 	free(ctx);
2581 }
2582 
2583 static void
2584 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2585 {
2586 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2587 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2588 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2589 
2590 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2591 	spdk_for_each_channel_continue(iter, 0);
2592 }
2593 
2594 int
2595 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2596 {
2597 	struct accel_get_stats_ctx *ctx;
2598 
2599 	ctx = calloc(1, sizeof(*ctx));
2600 	if (ctx == NULL) {
2601 		return -ENOMEM;
2602 	}
2603 
2604 	spdk_spin_lock(&g_stats_lock);
2605 	accel_add_stats(&ctx->stats, &g_stats);
2606 	spdk_spin_unlock(&g_stats_lock);
2607 
2608 	ctx->cb_fn = cb_fn;
2609 	ctx->cb_arg = cb_arg;
2610 
2611 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2612 			      accel_get_channel_stats_done);
2613 
2614 	return 0;
2615 }
2616 
2617 SPDK_LOG_REGISTER_COMPONENT(accel)
2618