xref: /spdk/lib/accel/accel.c (revision edc93a246b86d3e37107459c6b8d22de8abf72ab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 struct accel_module {
39 	struct spdk_accel_module_if	*module;
40 	bool				supports_memory_domains;
41 };
42 
43 /* Largest context size for all accel modules */
44 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
45 
46 static struct spdk_accel_module_if *g_accel_module = NULL;
47 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
48 static void *g_fini_cb_arg = NULL;
49 static bool g_modules_started = false;
50 static struct spdk_memory_domain *g_accel_domain;
51 
52 /* Global list of registered accelerator modules */
53 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
54 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
55 
56 /* Crypto keyring */
57 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
58 static struct spdk_spinlock g_keyring_spin;
59 
60 /* Global array mapping capabilities to modules */
61 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
62 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
63 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
64 static struct spdk_accel_driver *g_accel_driver;
65 
66 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
67 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
68 	"compress", "decompress", "encrypt", "decrypt", "xor"
69 };
70 
71 enum accel_sequence_state {
72 	ACCEL_SEQUENCE_STATE_INIT,
73 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
74 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
75 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
76 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
77 	ACCEL_SEQUENCE_STATE_PULL_DATA,
78 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
79 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
80 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
81 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
82 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
83 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
84 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
85 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
86 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
87 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
88 	ACCEL_SEQUENCE_STATE_ERROR,
89 	ACCEL_SEQUENCE_STATE_MAX,
90 };
91 
92 static const char *g_seq_states[]
93 __attribute__((unused)) = {
94 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
95 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
96 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
97 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
98 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
99 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
100 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
101 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
102 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
103 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
104 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
105 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
106 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
107 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
108 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
109 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
110 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
111 	[ACCEL_SEQUENCE_STATE_MAX] = "",
112 };
113 
114 #define ACCEL_SEQUENCE_STATE_STRING(s) \
115 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
116 	 ? g_seq_states[s] : "unknown")
117 
118 struct accel_buffer {
119 	struct spdk_accel_sequence	*seq;
120 	void				*buf;
121 	uint64_t			len;
122 	struct spdk_iobuf_entry		iobuf;
123 	spdk_accel_sequence_get_buf_cb	cb_fn;
124 	void				*cb_ctx;
125 	TAILQ_ENTRY(accel_buffer)	link;
126 };
127 
128 struct accel_io_channel {
129 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
130 	void					*task_pool_base;
131 	struct spdk_accel_sequence		*seq_pool_base;
132 	struct accel_buffer			*buf_pool_base;
133 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
134 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
135 	TAILQ_HEAD(, accel_buffer)		buf_pool;
136 	struct spdk_iobuf_channel		iobuf;
137 };
138 
139 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
140 
141 struct spdk_accel_sequence {
142 	struct accel_io_channel			*ch;
143 	struct accel_sequence_tasks		tasks;
144 	struct accel_sequence_tasks		completed;
145 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
146 	enum accel_sequence_state		state;
147 	int					status;
148 	bool					in_process_sequence;
149 	spdk_accel_completion_cb		cb_fn;
150 	void					*cb_arg;
151 	TAILQ_ENTRY(spdk_accel_sequence)	link;
152 };
153 
154 static inline void
155 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
156 {
157 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
158 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
159 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
160 	seq->state = state;
161 }
162 
163 static void
164 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
165 {
166 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
167 	assert(status != 0);
168 	seq->status = status;
169 }
170 
171 int
172 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
173 {
174 	if (opcode >= ACCEL_OPC_LAST) {
175 		/* invalid opcode */
176 		return -EINVAL;
177 	}
178 
179 	if (g_modules_opc[opcode].module) {
180 		*module_name = g_modules_opc[opcode].module->name;
181 	} else {
182 		return -ENOENT;
183 	}
184 
185 	return 0;
186 }
187 
188 void
189 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
190 {
191 	struct spdk_accel_module_if *accel_module;
192 	enum accel_opcode opcode;
193 	int j = 0;
194 
195 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
196 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
197 			if (accel_module->supports_opcode(opcode)) {
198 				info->ops[j] = opcode;
199 				j++;
200 			}
201 		}
202 		info->name = accel_module->name;
203 		info->num_ops = j;
204 		fn(info);
205 		j = 0;
206 	}
207 }
208 
209 int
210 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
211 {
212 	int rc = 0;
213 
214 	if (opcode < ACCEL_OPC_LAST) {
215 		*opcode_name = g_opcode_strings[opcode];
216 	} else {
217 		/* invalid opcode */
218 		rc = -EINVAL;
219 	}
220 
221 	return rc;
222 }
223 
224 int
225 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
226 {
227 	if (g_modules_started == true) {
228 		/* we don't allow re-assignment once things have started */
229 		return -EINVAL;
230 	}
231 
232 	if (opcode >= ACCEL_OPC_LAST) {
233 		/* invalid opcode */
234 		return -EINVAL;
235 	}
236 
237 	/* module selection will be validated after the framework starts. */
238 	g_modules_opc_override[opcode] = strdup(name);
239 
240 	return 0;
241 }
242 
243 void
244 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
245 {
246 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
247 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
248 	void				*cb_arg = accel_task->cb_arg;
249 
250 	/* We should put the accel_task into the list firstly in order to avoid
251 	 * the accel task list is exhausted when there is recursive call to
252 	 * allocate accel_task in user's call back function (cb_fn)
253 	 */
254 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
255 
256 	cb_fn(cb_arg, status);
257 }
258 
259 inline static struct spdk_accel_task *
260 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
261 {
262 	struct spdk_accel_task *accel_task;
263 
264 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
265 	if (accel_task == NULL) {
266 		return NULL;
267 	}
268 
269 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
270 	accel_task->link.tqe_next = NULL;
271 	accel_task->link.tqe_prev = NULL;
272 
273 	accel_task->cb_fn = cb_fn;
274 	accel_task->cb_arg = cb_arg;
275 	accel_task->accel_ch = accel_ch;
276 	accel_task->bounce.s.orig_iovs = NULL;
277 	accel_task->bounce.d.orig_iovs = NULL;
278 
279 	return accel_task;
280 }
281 
282 /* Accel framework public API for copy function */
283 int
284 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
285 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
286 {
287 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
288 	struct spdk_accel_task *accel_task;
289 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY].module;
290 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY];
291 
292 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
293 	if (accel_task == NULL) {
294 		return -ENOMEM;
295 	}
296 
297 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
298 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
299 	accel_task->d.iovs[0].iov_base = dst;
300 	accel_task->d.iovs[0].iov_len = nbytes;
301 	accel_task->d.iovcnt = 1;
302 	accel_task->s.iovs[0].iov_base = src;
303 	accel_task->s.iovs[0].iov_len = nbytes;
304 	accel_task->s.iovcnt = 1;
305 	accel_task->op_code = ACCEL_OPC_COPY;
306 	accel_task->flags = flags;
307 	accel_task->src_domain = NULL;
308 	accel_task->dst_domain = NULL;
309 	accel_task->step_cb_fn = NULL;
310 
311 	return module->submit_tasks(module_ch, accel_task);
312 }
313 
314 /* Accel framework public API for dual cast copy function */
315 int
316 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
317 			   void *dst2, void *src, uint64_t nbytes, int flags,
318 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
319 {
320 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
321 	struct spdk_accel_task *accel_task;
322 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DUALCAST].module;
323 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DUALCAST];
324 
325 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
326 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
327 		return -EINVAL;
328 	}
329 
330 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
331 	if (accel_task == NULL) {
332 		return -ENOMEM;
333 	}
334 
335 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
336 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
337 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
338 	accel_task->d.iovs[0].iov_base = dst1;
339 	accel_task->d.iovs[0].iov_len = nbytes;
340 	accel_task->d.iovcnt = 1;
341 	accel_task->d2.iovs[0].iov_base = dst2;
342 	accel_task->d2.iovs[0].iov_len = nbytes;
343 	accel_task->d2.iovcnt = 1;
344 	accel_task->s.iovs[0].iov_base = src;
345 	accel_task->s.iovs[0].iov_len = nbytes;
346 	accel_task->s.iovcnt = 1;
347 	accel_task->flags = flags;
348 	accel_task->op_code = ACCEL_OPC_DUALCAST;
349 	accel_task->src_domain = NULL;
350 	accel_task->dst_domain = NULL;
351 	accel_task->step_cb_fn = NULL;
352 
353 	return module->submit_tasks(module_ch, accel_task);
354 }
355 
356 /* Accel framework public API for compare function */
357 int
358 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
359 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
360 			  void *cb_arg)
361 {
362 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
363 	struct spdk_accel_task *accel_task;
364 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPARE].module;
365 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPARE];
366 
367 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
368 	if (accel_task == NULL) {
369 		return -ENOMEM;
370 	}
371 
372 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
373 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
374 	accel_task->s.iovs[0].iov_base = src1;
375 	accel_task->s.iovs[0].iov_len = nbytes;
376 	accel_task->s.iovcnt = 1;
377 	accel_task->s2.iovs[0].iov_base = src2;
378 	accel_task->s2.iovs[0].iov_len = nbytes;
379 	accel_task->s2.iovcnt = 1;
380 	accel_task->op_code = ACCEL_OPC_COMPARE;
381 	accel_task->src_domain = NULL;
382 	accel_task->dst_domain = NULL;
383 	accel_task->step_cb_fn = NULL;
384 
385 	return module->submit_tasks(module_ch, accel_task);
386 }
387 
388 /* Accel framework public API for fill function */
389 int
390 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
391 		       uint8_t fill, uint64_t nbytes, int flags,
392 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
393 {
394 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
395 	struct spdk_accel_task *accel_task;
396 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_FILL].module;
397 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_FILL];
398 
399 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
400 	if (accel_task == NULL) {
401 		return -ENOMEM;
402 	}
403 
404 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
405 	accel_task->d.iovs[0].iov_base = dst;
406 	accel_task->d.iovs[0].iov_len = nbytes;
407 	accel_task->d.iovcnt = 1;
408 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
409 	accel_task->flags = flags;
410 	accel_task->op_code = ACCEL_OPC_FILL;
411 	accel_task->src_domain = NULL;
412 	accel_task->dst_domain = NULL;
413 	accel_task->step_cb_fn = NULL;
414 
415 	return module->submit_tasks(module_ch, accel_task);
416 }
417 
418 /* Accel framework public API for CRC-32C function */
419 int
420 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
421 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
422 			 void *cb_arg)
423 {
424 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
425 	struct spdk_accel_task *accel_task;
426 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
427 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
428 
429 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
430 	if (accel_task == NULL) {
431 		return -ENOMEM;
432 	}
433 
434 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
435 	accel_task->s.iovs[0].iov_base = src;
436 	accel_task->s.iovs[0].iov_len = nbytes;
437 	accel_task->s.iovcnt = 1;
438 	accel_task->crc_dst = crc_dst;
439 	accel_task->seed = seed;
440 	accel_task->op_code = ACCEL_OPC_CRC32C;
441 	accel_task->src_domain = NULL;
442 	accel_task->dst_domain = NULL;
443 	accel_task->step_cb_fn = NULL;
444 
445 	return module->submit_tasks(module_ch, accel_task);
446 }
447 
448 /* Accel framework public API for chained CRC-32C function */
449 int
450 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
451 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
452 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
453 {
454 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
455 	struct spdk_accel_task *accel_task;
456 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_CRC32C].module;
457 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_CRC32C];
458 
459 	if (iov == NULL) {
460 		SPDK_ERRLOG("iov should not be NULL");
461 		return -EINVAL;
462 	}
463 
464 	if (!iov_cnt) {
465 		SPDK_ERRLOG("iovcnt should not be zero value\n");
466 		return -EINVAL;
467 	}
468 
469 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
470 	if (accel_task == NULL) {
471 		SPDK_ERRLOG("no memory\n");
472 		assert(0);
473 		return -ENOMEM;
474 	}
475 
476 	accel_task->s.iovs = iov;
477 	accel_task->s.iovcnt = iov_cnt;
478 	accel_task->crc_dst = crc_dst;
479 	accel_task->seed = seed;
480 	accel_task->op_code = ACCEL_OPC_CRC32C;
481 	accel_task->src_domain = NULL;
482 	accel_task->dst_domain = NULL;
483 	accel_task->step_cb_fn = NULL;
484 
485 	return module->submit_tasks(module_ch, accel_task);
486 }
487 
488 /* Accel framework public API for copy with CRC-32C function */
489 int
490 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
491 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
492 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
493 {
494 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
495 	struct spdk_accel_task *accel_task;
496 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
497 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
498 
499 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
500 	if (accel_task == NULL) {
501 		return -ENOMEM;
502 	}
503 
504 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
505 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
506 	accel_task->d.iovs[0].iov_base = dst;
507 	accel_task->d.iovs[0].iov_len = nbytes;
508 	accel_task->d.iovcnt = 1;
509 	accel_task->s.iovs[0].iov_base = src;
510 	accel_task->s.iovs[0].iov_len = nbytes;
511 	accel_task->s.iovcnt = 1;
512 	accel_task->crc_dst = crc_dst;
513 	accel_task->seed = seed;
514 	accel_task->flags = flags;
515 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
516 	accel_task->src_domain = NULL;
517 	accel_task->dst_domain = NULL;
518 	accel_task->step_cb_fn = NULL;
519 
520 	return module->submit_tasks(module_ch, accel_task);
521 }
522 
523 /* Accel framework public API for chained copy + CRC-32C function */
524 int
525 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
526 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
527 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
528 {
529 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
530 	struct spdk_accel_task *accel_task;
531 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COPY_CRC32C].module;
532 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COPY_CRC32C];
533 	uint64_t nbytes;
534 	uint32_t i;
535 
536 	if (src_iovs == NULL) {
537 		SPDK_ERRLOG("iov should not be NULL");
538 		return -EINVAL;
539 	}
540 
541 	if (!iov_cnt) {
542 		SPDK_ERRLOG("iovcnt should not be zero value\n");
543 		return -EINVAL;
544 	}
545 
546 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
547 	if (accel_task == NULL) {
548 		SPDK_ERRLOG("no memory\n");
549 		assert(0);
550 		return -ENOMEM;
551 	}
552 
553 	nbytes = 0;
554 	for (i = 0; i < iov_cnt; i++) {
555 		nbytes += src_iovs[i].iov_len;
556 	}
557 
558 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
559 	accel_task->d.iovs[0].iov_base = dst;
560 	accel_task->d.iovs[0].iov_len = nbytes;
561 	accel_task->d.iovcnt = 1;
562 	accel_task->s.iovs = src_iovs;
563 	accel_task->s.iovcnt = iov_cnt;
564 	accel_task->crc_dst = crc_dst;
565 	accel_task->seed = seed;
566 	accel_task->flags = flags;
567 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
568 	accel_task->src_domain = NULL;
569 	accel_task->dst_domain = NULL;
570 	accel_task->step_cb_fn = NULL;
571 
572 	return module->submit_tasks(module_ch, accel_task);
573 }
574 
575 int
576 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
577 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
578 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
579 {
580 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
581 	struct spdk_accel_task *accel_task;
582 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_COMPRESS].module;
583 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_COMPRESS];
584 
585 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
586 	if (accel_task == NULL) {
587 		return -ENOMEM;
588 	}
589 
590 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
591 	accel_task->d.iovs[0].iov_base = dst;
592 	accel_task->d.iovs[0].iov_len = nbytes;
593 	accel_task->d.iovcnt = 1;
594 	accel_task->output_size = output_size;
595 	accel_task->s.iovs = src_iovs;
596 	accel_task->s.iovcnt = src_iovcnt;
597 	accel_task->flags = flags;
598 	accel_task->op_code = ACCEL_OPC_COMPRESS;
599 	accel_task->src_domain = NULL;
600 	accel_task->dst_domain = NULL;
601 	accel_task->step_cb_fn = NULL;
602 
603 	return module->submit_tasks(module_ch, accel_task);
604 }
605 
606 int
607 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
608 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
609 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
610 			     void *cb_arg)
611 {
612 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
613 	struct spdk_accel_task *accel_task;
614 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECOMPRESS].module;
615 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECOMPRESS];
616 
617 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
618 	if (accel_task == NULL) {
619 		return -ENOMEM;
620 	}
621 
622 	accel_task->output_size = output_size;
623 	accel_task->s.iovs = src_iovs;
624 	accel_task->s.iovcnt = src_iovcnt;
625 	accel_task->d.iovs = dst_iovs;
626 	accel_task->d.iovcnt = dst_iovcnt;
627 	accel_task->flags = flags;
628 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
629 	accel_task->src_domain = NULL;
630 	accel_task->dst_domain = NULL;
631 	accel_task->step_cb_fn = NULL;
632 
633 	return module->submit_tasks(module_ch, accel_task);
634 }
635 
636 int
637 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
638 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
639 			  struct iovec *src_iovs, uint32_t src_iovcnt,
640 			  uint64_t iv, uint32_t block_size, int flags,
641 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
642 {
643 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
644 	struct spdk_accel_task *accel_task;
645 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
646 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_ENCRYPT];
647 
648 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
649 		return -EINVAL;
650 	}
651 
652 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
653 	if (accel_task == NULL) {
654 		return -ENOMEM;
655 	}
656 
657 	accel_task->crypto_key = key;
658 	accel_task->s.iovs = src_iovs;
659 	accel_task->s.iovcnt = src_iovcnt;
660 	accel_task->d.iovs = dst_iovs;
661 	accel_task->d.iovcnt = dst_iovcnt;
662 	accel_task->iv = iv;
663 	accel_task->block_size = block_size;
664 	accel_task->flags = flags;
665 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
666 	accel_task->src_domain = NULL;
667 	accel_task->dst_domain = NULL;
668 	accel_task->step_cb_fn = NULL;
669 
670 	return module->submit_tasks(module_ch, accel_task);
671 }
672 
673 int
674 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
675 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
676 			  struct iovec *src_iovs, uint32_t src_iovcnt,
677 			  uint64_t iv, uint32_t block_size, int flags,
678 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
679 {
680 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
681 	struct spdk_accel_task *accel_task;
682 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_DECRYPT].module;
683 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_DECRYPT];
684 
685 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
686 		return -EINVAL;
687 	}
688 
689 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
690 	if (accel_task == NULL) {
691 		return -ENOMEM;
692 	}
693 
694 	accel_task->crypto_key = key;
695 	accel_task->s.iovs = src_iovs;
696 	accel_task->s.iovcnt = src_iovcnt;
697 	accel_task->d.iovs = dst_iovs;
698 	accel_task->d.iovcnt = dst_iovcnt;
699 	accel_task->iv = iv;
700 	accel_task->block_size = block_size;
701 	accel_task->flags = flags;
702 	accel_task->op_code = ACCEL_OPC_DECRYPT;
703 	accel_task->src_domain = NULL;
704 	accel_task->dst_domain = NULL;
705 	accel_task->step_cb_fn = NULL;
706 
707 	return module->submit_tasks(module_ch, accel_task);
708 }
709 
710 int
711 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
712 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
713 {
714 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
715 	struct spdk_accel_task *accel_task;
716 	struct spdk_accel_module_if *module = g_modules_opc[ACCEL_OPC_XOR].module;
717 	struct spdk_io_channel *module_ch = accel_ch->module_ch[ACCEL_OPC_XOR];
718 
719 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
720 	if (accel_task == NULL) {
721 		return -ENOMEM;
722 	}
723 
724 	accel_task->nsrcs.srcs = sources;
725 	accel_task->nsrcs.cnt = nsrcs;
726 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
727 	accel_task->d.iovs[0].iov_base = dst;
728 	accel_task->d.iovs[0].iov_len = nbytes;
729 	accel_task->d.iovcnt = 1;
730 	accel_task->op_code = ACCEL_OPC_XOR;
731 	accel_task->src_domain = NULL;
732 	accel_task->dst_domain = NULL;
733 	accel_task->step_cb_fn = NULL;
734 
735 	return module->submit_tasks(module_ch, accel_task);
736 }
737 
738 static inline struct accel_buffer *
739 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
740 {
741 	struct accel_buffer *buf;
742 
743 	buf = TAILQ_FIRST(&ch->buf_pool);
744 	if (spdk_unlikely(buf == NULL)) {
745 		return NULL;
746 	}
747 
748 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
749 	buf->len = len;
750 	buf->buf = NULL;
751 	buf->seq = NULL;
752 	buf->cb_fn = NULL;
753 
754 	return buf;
755 }
756 
757 static inline void
758 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
759 {
760 	if (buf->buf != NULL) {
761 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
762 	}
763 
764 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
765 }
766 
767 static inline struct spdk_accel_sequence *
768 accel_sequence_get(struct accel_io_channel *ch)
769 {
770 	struct spdk_accel_sequence *seq;
771 
772 	seq = TAILQ_FIRST(&ch->seq_pool);
773 	if (seq == NULL) {
774 		return NULL;
775 	}
776 
777 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
778 
779 	TAILQ_INIT(&seq->tasks);
780 	TAILQ_INIT(&seq->completed);
781 	TAILQ_INIT(&seq->bounce_bufs);
782 
783 	seq->ch = ch;
784 	seq->status = 0;
785 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
786 	seq->in_process_sequence = false;
787 
788 	return seq;
789 }
790 
791 static inline void
792 accel_sequence_put(struct spdk_accel_sequence *seq)
793 {
794 	struct accel_io_channel *ch = seq->ch;
795 	struct accel_buffer *buf;
796 
797 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
798 		buf = TAILQ_FIRST(&seq->bounce_bufs);
799 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
800 		accel_put_buf(seq->ch, buf);
801 	}
802 
803 	assert(TAILQ_EMPTY(&seq->tasks));
804 	assert(TAILQ_EMPTY(&seq->completed));
805 	seq->ch = NULL;
806 
807 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
808 }
809 
810 static void accel_sequence_task_cb(void *cb_arg, int status);
811 
812 static inline struct spdk_accel_task *
813 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
814 			spdk_accel_step_cb cb_fn, void *cb_arg)
815 {
816 	struct spdk_accel_task *task;
817 
818 	task = _get_task(ch, accel_sequence_task_cb, seq);
819 	if (task == NULL) {
820 		return task;
821 	}
822 
823 	task->step_cb_fn = cb_fn;
824 	task->step_cb_arg = cb_arg;
825 
826 	return task;
827 }
828 
829 int
830 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
831 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
832 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
833 		       struct iovec *src_iovs, uint32_t src_iovcnt,
834 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
835 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
836 {
837 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
838 	struct spdk_accel_task *task;
839 	struct spdk_accel_sequence *seq = *pseq;
840 
841 	if (seq == NULL) {
842 		seq = accel_sequence_get(accel_ch);
843 		if (spdk_unlikely(seq == NULL)) {
844 			return -ENOMEM;
845 		}
846 	}
847 
848 	assert(seq->ch == accel_ch);
849 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
850 	if (spdk_unlikely(task == NULL)) {
851 		if (*pseq == NULL) {
852 			accel_sequence_put(seq);
853 		}
854 
855 		return -ENOMEM;
856 	}
857 
858 	task->dst_domain = dst_domain;
859 	task->dst_domain_ctx = dst_domain_ctx;
860 	task->d.iovs = dst_iovs;
861 	task->d.iovcnt = dst_iovcnt;
862 	task->src_domain = src_domain;
863 	task->src_domain_ctx = src_domain_ctx;
864 	task->s.iovs = src_iovs;
865 	task->s.iovcnt = src_iovcnt;
866 	task->flags = flags;
867 	task->op_code = ACCEL_OPC_COPY;
868 
869 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
870 	*pseq = seq;
871 
872 	return 0;
873 }
874 
875 int
876 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
877 		       void *buf, uint64_t len,
878 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
879 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
880 {
881 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
882 	struct spdk_accel_task *task;
883 	struct spdk_accel_sequence *seq = *pseq;
884 
885 	if (seq == NULL) {
886 		seq = accel_sequence_get(accel_ch);
887 		if (spdk_unlikely(seq == NULL)) {
888 			return -ENOMEM;
889 		}
890 	}
891 
892 	assert(seq->ch == accel_ch);
893 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
894 	if (spdk_unlikely(task == NULL)) {
895 		if (*pseq == NULL) {
896 			accel_sequence_put(seq);
897 		}
898 
899 		return -ENOMEM;
900 	}
901 
902 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
903 
904 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
905 	task->d.iovs[0].iov_base = buf;
906 	task->d.iovs[0].iov_len = len;
907 	task->d.iovcnt = 1;
908 	task->src_domain = NULL;
909 	task->dst_domain = domain;
910 	task->dst_domain_ctx = domain_ctx;
911 	task->flags = flags;
912 	task->op_code = ACCEL_OPC_FILL;
913 
914 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
915 	*pseq = seq;
916 
917 	return 0;
918 }
919 
920 int
921 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
922 			     struct iovec *dst_iovs, size_t dst_iovcnt,
923 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
924 			     struct iovec *src_iovs, size_t src_iovcnt,
925 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
926 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
927 {
928 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
929 	struct spdk_accel_task *task;
930 	struct spdk_accel_sequence *seq = *pseq;
931 
932 	if (seq == NULL) {
933 		seq = accel_sequence_get(accel_ch);
934 		if (spdk_unlikely(seq == NULL)) {
935 			return -ENOMEM;
936 		}
937 	}
938 
939 	assert(seq->ch == accel_ch);
940 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
941 	if (spdk_unlikely(task == NULL)) {
942 		if (*pseq == NULL) {
943 			accel_sequence_put(seq);
944 		}
945 
946 		return -ENOMEM;
947 	}
948 
949 	/* TODO: support output_size for chaining */
950 	task->output_size = NULL;
951 	task->dst_domain = dst_domain;
952 	task->dst_domain_ctx = dst_domain_ctx;
953 	task->d.iovs = dst_iovs;
954 	task->d.iovcnt = dst_iovcnt;
955 	task->src_domain = src_domain;
956 	task->src_domain_ctx = src_domain_ctx;
957 	task->s.iovs = src_iovs;
958 	task->s.iovcnt = src_iovcnt;
959 	task->flags = flags;
960 	task->op_code = ACCEL_OPC_DECOMPRESS;
961 
962 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
963 	*pseq = seq;
964 
965 	return 0;
966 }
967 
968 int
969 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
970 			  struct spdk_accel_crypto_key *key,
971 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
972 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
973 			  struct iovec *src_iovs, uint32_t src_iovcnt,
974 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
975 			  uint64_t iv, uint32_t block_size, int flags,
976 			  spdk_accel_step_cb cb_fn, void *cb_arg)
977 {
978 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
979 	struct spdk_accel_task *task;
980 	struct spdk_accel_sequence *seq = *pseq;
981 
982 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
983 			  !block_size)) {
984 		return -EINVAL;
985 	}
986 
987 	if (seq == NULL) {
988 		seq = accel_sequence_get(accel_ch);
989 		if (spdk_unlikely(seq == NULL)) {
990 			return -ENOMEM;
991 		}
992 	}
993 
994 	assert(seq->ch == accel_ch);
995 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
996 	if (spdk_unlikely(task == NULL)) {
997 		if (*pseq == NULL) {
998 			accel_sequence_put(seq);
999 		}
1000 
1001 		return -ENOMEM;
1002 	}
1003 
1004 	task->crypto_key = key;
1005 	task->src_domain = src_domain;
1006 	task->src_domain_ctx = src_domain_ctx;
1007 	task->s.iovs = src_iovs;
1008 	task->s.iovcnt = src_iovcnt;
1009 	task->dst_domain = dst_domain;
1010 	task->dst_domain_ctx = dst_domain_ctx;
1011 	task->d.iovs = dst_iovs;
1012 	task->d.iovcnt = dst_iovcnt;
1013 	task->iv = iv;
1014 	task->block_size = block_size;
1015 	task->flags = flags;
1016 	task->op_code = ACCEL_OPC_ENCRYPT;
1017 
1018 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1019 	*pseq = seq;
1020 
1021 	return 0;
1022 }
1023 
1024 int
1025 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1026 			  struct spdk_accel_crypto_key *key,
1027 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1028 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1029 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1030 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1031 			  uint64_t iv, uint32_t block_size, int flags,
1032 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1033 {
1034 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1035 	struct spdk_accel_task *task;
1036 	struct spdk_accel_sequence *seq = *pseq;
1037 
1038 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1039 			  !block_size)) {
1040 		return -EINVAL;
1041 	}
1042 
1043 	if (seq == NULL) {
1044 		seq = accel_sequence_get(accel_ch);
1045 		if (spdk_unlikely(seq == NULL)) {
1046 			return -ENOMEM;
1047 		}
1048 	}
1049 
1050 	assert(seq->ch == accel_ch);
1051 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1052 	if (spdk_unlikely(task == NULL)) {
1053 		if (*pseq == NULL) {
1054 			accel_sequence_put(seq);
1055 		}
1056 
1057 		return -ENOMEM;
1058 	}
1059 
1060 	task->crypto_key = key;
1061 	task->src_domain = src_domain;
1062 	task->src_domain_ctx = src_domain_ctx;
1063 	task->s.iovs = src_iovs;
1064 	task->s.iovcnt = src_iovcnt;
1065 	task->dst_domain = dst_domain;
1066 	task->dst_domain_ctx = dst_domain_ctx;
1067 	task->d.iovs = dst_iovs;
1068 	task->d.iovcnt = dst_iovcnt;
1069 	task->iv = iv;
1070 	task->block_size = block_size;
1071 	task->flags = flags;
1072 	task->op_code = ACCEL_OPC_DECRYPT;
1073 
1074 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1075 	*pseq = seq;
1076 
1077 	return 0;
1078 }
1079 
1080 int
1081 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1082 		   struct spdk_memory_domain **domain, void **domain_ctx)
1083 {
1084 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1085 	struct accel_buffer *accel_buf;
1086 
1087 	accel_buf = accel_get_buf(accel_ch, len);
1088 	if (spdk_unlikely(accel_buf == NULL)) {
1089 		return -ENOMEM;
1090 	}
1091 
1092 	/* We always return the same pointer and identify the buffers through domain_ctx */
1093 	*buf = ACCEL_BUFFER_BASE;
1094 	*domain_ctx = accel_buf;
1095 	*domain = g_accel_domain;
1096 
1097 	return 0;
1098 }
1099 
1100 void
1101 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1102 		   struct spdk_memory_domain *domain, void *domain_ctx)
1103 {
1104 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1105 	struct accel_buffer *accel_buf = domain_ctx;
1106 
1107 	assert(domain == g_accel_domain);
1108 	assert(buf == ACCEL_BUFFER_BASE);
1109 
1110 	accel_put_buf(accel_ch, accel_buf);
1111 }
1112 
1113 static void
1114 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1115 {
1116 	struct spdk_accel_task *task;
1117 	struct accel_io_channel *ch = seq->ch;
1118 	spdk_accel_step_cb cb_fn;
1119 	void *cb_arg;
1120 
1121 	while (!TAILQ_EMPTY(&seq->completed)) {
1122 		task = TAILQ_FIRST(&seq->completed);
1123 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1124 		cb_fn = task->step_cb_fn;
1125 		cb_arg = task->step_cb_arg;
1126 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1127 		if (cb_fn != NULL) {
1128 			cb_fn(cb_arg);
1129 		}
1130 	}
1131 
1132 	while (!TAILQ_EMPTY(&seq->tasks)) {
1133 		task = TAILQ_FIRST(&seq->tasks);
1134 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1135 		cb_fn = task->step_cb_fn;
1136 		cb_arg = task->step_cb_arg;
1137 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1138 		if (cb_fn != NULL) {
1139 			cb_fn(cb_arg);
1140 		}
1141 	}
1142 }
1143 
1144 static void
1145 accel_sequence_complete(struct spdk_accel_sequence *seq)
1146 {
1147 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1148 
1149 	/* First notify all users that appended operations to this sequence */
1150 	accel_sequence_complete_tasks(seq);
1151 
1152 	/* Then notify the user that finished the sequence */
1153 	seq->cb_fn(seq->cb_arg, seq->status);
1154 
1155 	accel_sequence_put(seq);
1156 }
1157 
1158 static void
1159 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1160 {
1161 	uintptr_t offset;
1162 
1163 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1164 	assert(offset < accel_buf->len);
1165 
1166 	diov->iov_base = (char *)accel_buf->buf + offset;
1167 	diov->iov_len = siov->iov_len;
1168 }
1169 
1170 static void
1171 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1172 {
1173 	struct spdk_accel_task *task;
1174 	struct iovec *iov;
1175 
1176 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1177 	 * in a sequence that were using it.
1178 	 */
1179 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1180 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1181 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1182 			assert(task->s.iovcnt == 1);
1183 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1184 			task->src_domain = NULL;
1185 			task->s.iovs = iov;
1186 		}
1187 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1188 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1189 			assert(task->d.iovcnt == 1);
1190 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1191 			task->dst_domain = NULL;
1192 			task->d.iovs = iov;
1193 		}
1194 	}
1195 }
1196 
1197 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1198 
1199 static void
1200 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1201 {
1202 	struct accel_buffer *accel_buf;
1203 
1204 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1205 
1206 	assert(accel_buf->seq != NULL);
1207 	assert(accel_buf->buf == NULL);
1208 	accel_buf->buf = buf;
1209 
1210 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1211 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1212 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1213 	accel_process_sequence(accel_buf->seq);
1214 }
1215 
1216 static bool
1217 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1218 			 spdk_iobuf_get_cb cb_fn)
1219 {
1220 	struct accel_io_channel *ch = seq->ch;
1221 
1222 	assert(buf->buf == NULL);
1223 	assert(buf->seq == NULL);
1224 
1225 	buf->seq = seq;
1226 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1227 	if (buf->buf == NULL) {
1228 		return false;
1229 	}
1230 
1231 	return true;
1232 }
1233 
1234 static bool
1235 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1236 {
1237 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1238 	 * NULL */
1239 	if (task->src_domain == g_accel_domain) {
1240 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1241 					      accel_iobuf_get_virtbuf_cb)) {
1242 			return false;
1243 		}
1244 
1245 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1246 	}
1247 
1248 	if (task->dst_domain == g_accel_domain) {
1249 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1250 					      accel_iobuf_get_virtbuf_cb)) {
1251 			return false;
1252 		}
1253 
1254 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1255 	}
1256 
1257 	return true;
1258 }
1259 
1260 static void
1261 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1262 {
1263 	struct accel_buffer *accel_buf;
1264 
1265 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1266 
1267 	assert(accel_buf->seq != NULL);
1268 	assert(accel_buf->buf == NULL);
1269 	accel_buf->buf = buf;
1270 
1271 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1272 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1273 }
1274 
1275 bool
1276 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1277 			      struct spdk_memory_domain *domain, void *domain_ctx,
1278 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1279 {
1280 	struct accel_buffer *accel_buf = domain_ctx;
1281 
1282 	assert(domain == g_accel_domain);
1283 	accel_buf->cb_fn = cb_fn;
1284 	accel_buf->cb_ctx = cb_ctx;
1285 
1286 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1287 		return false;
1288 	}
1289 
1290 	accel_sequence_set_virtbuf(seq, accel_buf);
1291 
1292 	return true;
1293 }
1294 
1295 struct spdk_accel_task *
1296 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1297 {
1298 	return TAILQ_FIRST(&seq->tasks);
1299 }
1300 
1301 struct spdk_accel_task *
1302 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1303 {
1304 	return TAILQ_NEXT(task, seq_link);
1305 }
1306 
1307 static inline uint64_t
1308 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
1309 {
1310 	uint64_t result = 0;
1311 	uint32_t i;
1312 
1313 	for (i = 0; i < iovcnt; ++i) {
1314 		result += iovs[i].iov_len;
1315 	}
1316 
1317 	return result;
1318 }
1319 
1320 static inline void
1321 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1322 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1323 			struct accel_buffer *buf)
1324 {
1325 	bounce->orig_iovs = *iovs;
1326 	bounce->orig_iovcnt = *iovcnt;
1327 	bounce->orig_domain = *domain;
1328 	bounce->orig_domain_ctx = *domain_ctx;
1329 	bounce->iov.iov_base = buf->buf;
1330 	bounce->iov.iov_len = buf->len;
1331 
1332 	*iovs = &bounce->iov;
1333 	*iovcnt = 1;
1334 	*domain = NULL;
1335 }
1336 
1337 static void
1338 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1339 {
1340 	struct spdk_accel_task *task;
1341 	struct accel_buffer *accel_buf;
1342 
1343 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1344 	assert(accel_buf->buf == NULL);
1345 	accel_buf->buf = buf;
1346 
1347 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1348 	assert(task != NULL);
1349 
1350 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1351 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1352 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1353 				&task->src_domain_ctx, accel_buf);
1354 	accel_process_sequence(accel_buf->seq);
1355 }
1356 
1357 static void
1358 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1359 {
1360 	struct spdk_accel_task *task;
1361 	struct accel_buffer *accel_buf;
1362 
1363 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1364 	assert(accel_buf->buf == NULL);
1365 	accel_buf->buf = buf;
1366 
1367 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1368 	assert(task != NULL);
1369 
1370 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1371 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1372 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1373 				&task->dst_domain_ctx, accel_buf);
1374 	accel_process_sequence(accel_buf->seq);
1375 }
1376 
1377 static int
1378 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1379 {
1380 	struct accel_buffer *buf;
1381 
1382 	if (task->src_domain != NULL) {
1383 		/* By the time we're here, accel buffers should have been allocated */
1384 		assert(task->src_domain != g_accel_domain);
1385 
1386 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1387 		if (buf == NULL) {
1388 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1389 			return -ENOMEM;
1390 		}
1391 
1392 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1393 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1394 			return -EAGAIN;
1395 		}
1396 
1397 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1398 					&task->src_domain, &task->src_domain_ctx, buf);
1399 	}
1400 
1401 	if (task->dst_domain != NULL) {
1402 		/* By the time we're here, accel buffers should have been allocated */
1403 		assert(task->dst_domain != g_accel_domain);
1404 
1405 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1406 		if (buf == NULL) {
1407 			/* The src buffer will be released when a sequence is completed */
1408 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1409 			return -ENOMEM;
1410 		}
1411 
1412 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1413 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1414 			return -EAGAIN;
1415 		}
1416 
1417 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1418 					&task->dst_domain, &task->dst_domain_ctx, buf);
1419 	}
1420 
1421 	return 0;
1422 }
1423 
1424 static void
1425 accel_task_pull_data_cb(void *ctx, int status)
1426 {
1427 	struct spdk_accel_sequence *seq = ctx;
1428 
1429 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1430 	if (spdk_likely(status == 0)) {
1431 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1432 	} else {
1433 		accel_sequence_set_fail(seq, status);
1434 	}
1435 
1436 	accel_process_sequence(seq);
1437 }
1438 
1439 static void
1440 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1441 {
1442 	int rc;
1443 
1444 	assert(task->bounce.s.orig_iovs != NULL);
1445 	assert(task->bounce.s.orig_domain != NULL);
1446 	assert(task->bounce.s.orig_domain != g_accel_domain);
1447 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1448 
1449 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1450 					  task->bounce.s.orig_domain_ctx,
1451 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1452 					  task->s.iovs, task->s.iovcnt,
1453 					  accel_task_pull_data_cb, seq);
1454 	if (spdk_unlikely(rc != 0)) {
1455 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1456 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1457 		accel_sequence_set_fail(seq, rc);
1458 	}
1459 }
1460 
1461 static void
1462 accel_task_push_data_cb(void *ctx, int status)
1463 {
1464 	struct spdk_accel_sequence *seq = ctx;
1465 
1466 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1467 	if (spdk_likely(status == 0)) {
1468 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1469 	} else {
1470 		accel_sequence_set_fail(seq, status);
1471 	}
1472 
1473 	accel_process_sequence(seq);
1474 }
1475 
1476 static void
1477 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1478 {
1479 	int rc;
1480 
1481 	assert(task->bounce.d.orig_iovs != NULL);
1482 	assert(task->bounce.d.orig_domain != NULL);
1483 	assert(task->bounce.d.orig_domain != g_accel_domain);
1484 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1485 
1486 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1487 					  task->bounce.d.orig_domain_ctx,
1488 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1489 					  task->d.iovs, task->d.iovcnt,
1490 					  accel_task_push_data_cb, seq);
1491 	if (spdk_unlikely(rc != 0)) {
1492 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1493 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1494 		accel_sequence_set_fail(seq, rc);
1495 	}
1496 }
1497 
1498 static void
1499 accel_process_sequence(struct spdk_accel_sequence *seq)
1500 {
1501 	struct accel_io_channel *accel_ch = seq->ch;
1502 	struct spdk_accel_module_if *module;
1503 	struct spdk_io_channel *module_ch;
1504 	struct spdk_accel_task *task;
1505 	enum accel_sequence_state state;
1506 	int rc;
1507 
1508 	/* Prevent recursive calls to this function */
1509 	if (spdk_unlikely(seq->in_process_sequence)) {
1510 		return;
1511 	}
1512 	seq->in_process_sequence = true;
1513 
1514 	task = TAILQ_FIRST(&seq->tasks);
1515 	assert(task != NULL);
1516 
1517 	do {
1518 		state = seq->state;
1519 		switch (state) {
1520 		case ACCEL_SEQUENCE_STATE_INIT:
1521 			if (g_accel_driver != NULL) {
1522 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1523 				break;
1524 			}
1525 		/* Fall through */
1526 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1527 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1528 			if (!accel_sequence_check_virtbuf(seq, task)) {
1529 				/* We couldn't allocate a buffer, wait until one is available */
1530 				break;
1531 			}
1532 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1533 		/* Fall through */
1534 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1535 			/* If a module supports memory domains, we don't need to allocate bounce
1536 			 * buffers */
1537 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1538 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1539 				break;
1540 			}
1541 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1542 			rc = accel_sequence_check_bouncebuf(seq, task);
1543 			if (rc != 0) {
1544 				/* We couldn't allocate a buffer, wait until one is available */
1545 				if (rc == -EAGAIN) {
1546 					break;
1547 				}
1548 				accel_sequence_set_fail(seq, rc);
1549 				break;
1550 			}
1551 			if (task->bounce.s.orig_iovs != NULL) {
1552 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1553 				break;
1554 			}
1555 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1556 		/* Fall through */
1557 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1558 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1559 				      g_opcode_strings[task->op_code], seq);
1560 
1561 			module = g_modules_opc[task->op_code].module;
1562 			module_ch = accel_ch->module_ch[task->op_code];
1563 
1564 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1565 			rc = module->submit_tasks(module_ch, task);
1566 			if (spdk_unlikely(rc != 0)) {
1567 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1568 					    g_opcode_strings[task->op_code], seq);
1569 				accel_sequence_set_fail(seq, rc);
1570 			}
1571 			break;
1572 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1573 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1574 			accel_task_pull_data(seq, task);
1575 			break;
1576 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1577 			if (task->bounce.d.orig_iovs != NULL) {
1578 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1579 				break;
1580 			}
1581 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1582 			break;
1583 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1584 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1585 			accel_task_push_data(seq, task);
1586 			break;
1587 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1588 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1589 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1590 			/* Check if there are any remaining tasks */
1591 			task = TAILQ_FIRST(&seq->tasks);
1592 			if (task == NULL) {
1593 				/* Immediately return here to make sure we don't touch the sequence
1594 				 * after it's completed */
1595 				accel_sequence_complete(seq);
1596 				return;
1597 			}
1598 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1599 			break;
1600 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1601 			assert(!TAILQ_EMPTY(&seq->tasks));
1602 
1603 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1604 			rc = g_accel_driver->execute_sequence(seq);
1605 			if (spdk_unlikely(rc != 0)) {
1606 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1607 					    seq, g_accel_driver->name);
1608 				accel_sequence_set_fail(seq, rc);
1609 			}
1610 			break;
1611 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1612 			task = TAILQ_FIRST(&seq->tasks);
1613 			if (task == NULL) {
1614 				/* Immediately return here to make sure we don't touch the sequence
1615 				 * after it's completed */
1616 				accel_sequence_complete(seq);
1617 				return;
1618 			}
1619 			/* We don't want to execute the next task through the driver, so we
1620 			 * explicitly omit the INIT state here */
1621 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1622 			break;
1623 		case ACCEL_SEQUENCE_STATE_ERROR:
1624 			/* Immediately return here to make sure we don't touch the sequence
1625 			 * after it's completed */
1626 			assert(seq->status != 0);
1627 			accel_sequence_complete(seq);
1628 			return;
1629 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1630 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1631 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1632 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1633 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1634 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1635 			break;
1636 		default:
1637 			assert(0 && "bad state");
1638 			break;
1639 		}
1640 	} while (seq->state != state);
1641 
1642 	seq->in_process_sequence = false;
1643 }
1644 
1645 static void
1646 accel_sequence_task_cb(void *cb_arg, int status)
1647 {
1648 	struct spdk_accel_sequence *seq = cb_arg;
1649 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1650 	struct accel_io_channel *accel_ch = seq->ch;
1651 
1652 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1653 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1654 	 * easiest way to prevent this, even though it is a bit hacky.
1655 	 */
1656 	assert(task != NULL);
1657 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1658 
1659 	switch (seq->state) {
1660 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1661 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1662 		if (spdk_unlikely(status != 0)) {
1663 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1664 				    g_opcode_strings[task->op_code], seq);
1665 			accel_sequence_set_fail(seq, status);
1666 		}
1667 
1668 		accel_process_sequence(seq);
1669 		break;
1670 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1671 		assert(g_accel_driver != NULL);
1672 		/* Immediately remove the task from the outstanding list to make sure the next call
1673 		 * to spdk_accel_sequence_first_task() doesn't return it */
1674 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1675 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1676 
1677 		if (spdk_unlikely(status != 0)) {
1678 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1679 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1680 				    g_accel_driver->name);
1681 			/* Update status without using accel_sequence_set_fail() to avoid changing
1682 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1683 			seq->status = status;
1684 		}
1685 		break;
1686 	default:
1687 		assert(0 && "bad state");
1688 		break;
1689 	}
1690 }
1691 
1692 void
1693 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1694 {
1695 	assert(g_accel_driver != NULL);
1696 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1697 
1698 	if (spdk_likely(seq->status == 0)) {
1699 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1700 	} else {
1701 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1702 	}
1703 
1704 	accel_process_sequence(seq);
1705 }
1706 
1707 static bool
1708 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1709 {
1710 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1711 	if (iovacnt != iovbcnt) {
1712 		return false;
1713 	}
1714 
1715 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1716 }
1717 
1718 static void
1719 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1720 			   struct spdk_accel_task **next_task)
1721 {
1722 	struct spdk_accel_task *next = *next_task;
1723 
1724 	switch (task->op_code) {
1725 	case ACCEL_OPC_COPY:
1726 		/* We only allow changing src of operations that actually have a src, e.g. we never
1727 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1728 		 * change the src of the operation after fill (which in turn could also be a fill).
1729 		 * So, for the sake of simplicity, skip this type of operations for now.
1730 		 */
1731 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1732 		    next->op_code != ACCEL_OPC_COPY &&
1733 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1734 		    next->op_code != ACCEL_OPC_DECRYPT) {
1735 			break;
1736 		}
1737 		if (task->dst_domain != next->src_domain) {
1738 			break;
1739 		}
1740 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1741 					next->s.iovs, next->s.iovcnt)) {
1742 			break;
1743 		}
1744 		next->s.iovs = task->s.iovs;
1745 		next->s.iovcnt = task->s.iovcnt;
1746 		next->src_domain = task->src_domain;
1747 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1748 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1749 		break;
1750 	case ACCEL_OPC_DECOMPRESS:
1751 	case ACCEL_OPC_FILL:
1752 	case ACCEL_OPC_ENCRYPT:
1753 	case ACCEL_OPC_DECRYPT:
1754 		/* We can only merge tasks when one of them is a copy */
1755 		if (next->op_code != ACCEL_OPC_COPY) {
1756 			break;
1757 		}
1758 		if (task->dst_domain != next->src_domain) {
1759 			break;
1760 		}
1761 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1762 					next->s.iovs, next->s.iovcnt)) {
1763 			break;
1764 		}
1765 		task->d.iovs = next->d.iovs;
1766 		task->d.iovcnt = next->d.iovcnt;
1767 		task->dst_domain = next->dst_domain;
1768 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1769 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1770 		*next_task = TAILQ_NEXT(next, seq_link);
1771 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1772 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1773 		break;
1774 	default:
1775 		assert(0 && "bad opcode");
1776 		break;
1777 	}
1778 }
1779 
1780 int
1781 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1782 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1783 {
1784 	struct spdk_accel_task *task, *next;
1785 
1786 	/* Try to remove any copy operations if possible */
1787 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1788 		if (next == NULL) {
1789 			break;
1790 		}
1791 		accel_sequence_merge_tasks(seq, task, &next);
1792 	}
1793 
1794 	seq->cb_fn = cb_fn;
1795 	seq->cb_arg = cb_arg;
1796 
1797 	accel_process_sequence(seq);
1798 
1799 	return 0;
1800 }
1801 
1802 void
1803 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1804 {
1805 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1806 	struct spdk_accel_task *task;
1807 
1808 	assert(TAILQ_EMPTY(&seq->completed));
1809 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1810 
1811 	while (!TAILQ_EMPTY(&tasks)) {
1812 		task = TAILQ_FIRST(&tasks);
1813 		TAILQ_REMOVE(&tasks, task, seq_link);
1814 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1815 	}
1816 }
1817 
1818 void
1819 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1820 {
1821 	if (seq == NULL) {
1822 		return;
1823 	}
1824 
1825 	accel_sequence_complete_tasks(seq);
1826 	accel_sequence_put(seq);
1827 }
1828 
1829 struct spdk_memory_domain *
1830 spdk_accel_get_memory_domain(void)
1831 {
1832 	return g_accel_domain;
1833 }
1834 
1835 static struct spdk_accel_module_if *
1836 _module_find_by_name(const char *name)
1837 {
1838 	struct spdk_accel_module_if *accel_module = NULL;
1839 
1840 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1841 		if (strcmp(name, accel_module->name) == 0) {
1842 			break;
1843 		}
1844 	}
1845 
1846 	return accel_module;
1847 }
1848 
1849 static inline struct spdk_accel_crypto_key *
1850 _accel_crypto_key_get(const char *name)
1851 {
1852 	struct spdk_accel_crypto_key *key;
1853 
1854 	assert(spdk_spin_held(&g_keyring_spin));
1855 
1856 	TAILQ_FOREACH(key, &g_keyring, link) {
1857 		if (strcmp(name, key->param.key_name) == 0) {
1858 			return key;
1859 		}
1860 	}
1861 
1862 	return NULL;
1863 }
1864 
1865 static void
1866 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1867 {
1868 	if (key->param.hex_key) {
1869 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1870 		free(key->param.hex_key);
1871 	}
1872 	if (key->param.hex_key2) {
1873 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1874 		free(key->param.hex_key2);
1875 	}
1876 	free(key->param.key_name);
1877 	free(key->param.cipher);
1878 	if (key->key) {
1879 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1880 		free(key->key);
1881 	}
1882 	if (key->key2) {
1883 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
1884 		free(key->key2);
1885 	}
1886 	free(key);
1887 }
1888 
1889 static void
1890 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
1891 {
1892 	assert(key->module_if);
1893 	assert(key->module_if->crypto_key_deinit);
1894 
1895 	key->module_if->crypto_key_deinit(key);
1896 	accel_crypto_key_free_mem(key);
1897 }
1898 
1899 /*
1900  * This function mitigates a timing side channel which could be caused by using strcmp()
1901  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
1902  * the article [1] for more details
1903  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
1904  */
1905 static bool
1906 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
1907 {
1908 	size_t i;
1909 	volatile size_t x = k1_len ^ k2_len;
1910 
1911 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
1912 		x |= k1[i] ^ k2[i];
1913 	}
1914 
1915 	return x == 0;
1916 }
1917 
1918 int
1919 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
1920 {
1921 	struct spdk_accel_module_if *module;
1922 	struct spdk_accel_crypto_key *key;
1923 	size_t hex_key_size, hex_key2_size;
1924 	int rc;
1925 
1926 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
1927 		return -EINVAL;
1928 	}
1929 
1930 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
1931 		/* hardly ever possible, but let's check and warn the user */
1932 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
1933 	}
1934 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
1935 
1936 	if (!module) {
1937 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
1938 		return -ENOENT;
1939 	}
1940 	if (!module->crypto_key_init) {
1941 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
1942 		return -ENOTSUP;
1943 	}
1944 
1945 	key = calloc(1, sizeof(*key));
1946 	if (!key) {
1947 		return -ENOMEM;
1948 	}
1949 
1950 	key->param.key_name = strdup(param->key_name);
1951 	if (!key->param.key_name) {
1952 		rc = -ENOMEM;
1953 		goto error;
1954 	}
1955 
1956 	key->param.cipher = strdup(param->cipher);
1957 	if (!key->param.cipher) {
1958 		rc = -ENOMEM;
1959 		goto error;
1960 	}
1961 
1962 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1963 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1964 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1965 		rc = -EINVAL;
1966 		goto error;
1967 	}
1968 	key->param.hex_key = strdup(param->hex_key);
1969 	if (!key->param.hex_key) {
1970 		rc = -ENOMEM;
1971 		goto error;
1972 	}
1973 
1974 	key->key_size = hex_key_size / 2;
1975 	key->key = spdk_unhexlify(key->param.hex_key);
1976 	if (!key->key) {
1977 		SPDK_ERRLOG("Failed to unhexlify key1\n");
1978 		rc = -EINVAL;
1979 		goto error;
1980 	}
1981 
1982 	if (param->hex_key2) {
1983 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1984 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
1985 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
1986 			rc = -EINVAL;
1987 			goto error;
1988 		}
1989 		key->param.hex_key2 = strdup(param->hex_key2);
1990 		if (!key->param.hex_key2) {
1991 			rc = -ENOMEM;
1992 			goto error;
1993 		}
1994 
1995 		key->key2_size = hex_key2_size / 2;
1996 		key->key2 = spdk_unhexlify(key->param.hex_key2);
1997 		if (!key->key2) {
1998 			SPDK_ERRLOG("Failed to unhexlify key2\n");
1999 			rc = -EINVAL;
2000 			goto error;
2001 		}
2002 
2003 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2004 			SPDK_ERRLOG("Identical keys are not secure\n");
2005 			rc = -EINVAL;
2006 			goto error;
2007 		}
2008 	}
2009 
2010 	key->module_if = module;
2011 
2012 	spdk_spin_lock(&g_keyring_spin);
2013 	if (_accel_crypto_key_get(param->key_name)) {
2014 		rc = -EEXIST;
2015 	} else {
2016 		rc = module->crypto_key_init(key);
2017 		if (!rc) {
2018 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2019 		}
2020 	}
2021 	spdk_spin_unlock(&g_keyring_spin);
2022 
2023 	if (rc) {
2024 		goto error;
2025 	}
2026 
2027 	return 0;
2028 
2029 error:
2030 	accel_crypto_key_free_mem(key);
2031 	return rc;
2032 }
2033 
2034 int
2035 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2036 {
2037 	if (!key || !key->module_if) {
2038 		return -EINVAL;
2039 	}
2040 
2041 	spdk_spin_lock(&g_keyring_spin);
2042 	if (!_accel_crypto_key_get(key->param.key_name)) {
2043 		spdk_spin_unlock(&g_keyring_spin);
2044 		return -ENOENT;
2045 	}
2046 	TAILQ_REMOVE(&g_keyring, key, link);
2047 	spdk_spin_unlock(&g_keyring_spin);
2048 
2049 	accel_crypto_key_destroy_unsafe(key);
2050 
2051 	return 0;
2052 }
2053 
2054 struct spdk_accel_crypto_key *
2055 spdk_accel_crypto_key_get(const char *name)
2056 {
2057 	struct spdk_accel_crypto_key *key;
2058 
2059 	spdk_spin_lock(&g_keyring_spin);
2060 	key = _accel_crypto_key_get(name);
2061 	spdk_spin_unlock(&g_keyring_spin);
2062 
2063 	return key;
2064 }
2065 
2066 /* Helper function when accel modules register with the framework. */
2067 void
2068 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2069 {
2070 	if (_module_find_by_name(accel_module->name)) {
2071 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2072 		assert(false);
2073 		return;
2074 	}
2075 
2076 	/* Make sure that the software module is at the head of the list, this
2077 	 * will assure that all opcodes are later assigned to software first and
2078 	 * then updated to HW modules as they are registered.
2079 	 */
2080 	if (strcmp(accel_module->name, "software") == 0) {
2081 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2082 	} else {
2083 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2084 	}
2085 
2086 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2087 		g_max_accel_module_size = accel_module->get_ctx_size();
2088 	}
2089 }
2090 
2091 /* Framework level channel create callback. */
2092 static int
2093 accel_create_channel(void *io_device, void *ctx_buf)
2094 {
2095 	struct accel_io_channel	*accel_ch = ctx_buf;
2096 	struct spdk_accel_task *accel_task;
2097 	struct spdk_accel_sequence *seq;
2098 	struct accel_buffer *buf;
2099 	uint8_t *task_mem;
2100 	int i = 0, j, rc;
2101 
2102 	accel_ch->task_pool_base = calloc(MAX_TASKS_PER_CHANNEL, g_max_accel_module_size);
2103 	if (accel_ch->task_pool_base == NULL) {
2104 		return -ENOMEM;
2105 	}
2106 
2107 	accel_ch->seq_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct spdk_accel_sequence));
2108 	if (accel_ch->seq_pool_base == NULL) {
2109 		goto err;
2110 	}
2111 
2112 	accel_ch->buf_pool_base = calloc(MAX_TASKS_PER_CHANNEL, sizeof(struct accel_buffer));
2113 	if (accel_ch->buf_pool_base == NULL) {
2114 		goto err;
2115 	}
2116 
2117 	TAILQ_INIT(&accel_ch->task_pool);
2118 	TAILQ_INIT(&accel_ch->seq_pool);
2119 	TAILQ_INIT(&accel_ch->buf_pool);
2120 	task_mem = accel_ch->task_pool_base;
2121 	for (i = 0 ; i < MAX_TASKS_PER_CHANNEL; i++) {
2122 		accel_task = (struct spdk_accel_task *)task_mem;
2123 		seq = &accel_ch->seq_pool_base[i];
2124 		buf = &accel_ch->buf_pool_base[i];
2125 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2126 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2127 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2128 		task_mem += g_max_accel_module_size;
2129 	}
2130 
2131 	/* Assign modules and get IO channels for each */
2132 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2133 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2134 		/* This can happen if idxd runs out of channels. */
2135 		if (accel_ch->module_ch[i] == NULL) {
2136 			goto err;
2137 		}
2138 	}
2139 
2140 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", ACCEL_SMALL_CACHE_SIZE,
2141 				     ACCEL_LARGE_CACHE_SIZE);
2142 	if (rc != 0) {
2143 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2144 		goto err;
2145 	}
2146 
2147 	return 0;
2148 err:
2149 	for (j = 0; j < i; j++) {
2150 		spdk_put_io_channel(accel_ch->module_ch[j]);
2151 	}
2152 	free(accel_ch->task_pool_base);
2153 	free(accel_ch->seq_pool_base);
2154 	free(accel_ch->buf_pool_base);
2155 	return -ENOMEM;
2156 }
2157 
2158 /* Framework level channel destroy callback. */
2159 static void
2160 accel_destroy_channel(void *io_device, void *ctx_buf)
2161 {
2162 	struct accel_io_channel	*accel_ch = ctx_buf;
2163 	int i;
2164 
2165 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2166 
2167 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2168 		assert(accel_ch->module_ch[i] != NULL);
2169 		spdk_put_io_channel(accel_ch->module_ch[i]);
2170 		accel_ch->module_ch[i] = NULL;
2171 	}
2172 
2173 	free(accel_ch->task_pool_base);
2174 	free(accel_ch->seq_pool_base);
2175 	free(accel_ch->buf_pool_base);
2176 }
2177 
2178 struct spdk_io_channel *
2179 spdk_accel_get_io_channel(void)
2180 {
2181 	return spdk_get_io_channel(&spdk_accel_module_list);
2182 }
2183 
2184 static void
2185 accel_module_initialize(void)
2186 {
2187 	struct spdk_accel_module_if *accel_module;
2188 
2189 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2190 		accel_module->module_init();
2191 	}
2192 }
2193 
2194 static void
2195 accel_module_init_opcode(enum accel_opcode opcode)
2196 {
2197 	struct accel_module *module = &g_modules_opc[opcode];
2198 	struct spdk_accel_module_if *module_if = module->module;
2199 
2200 	if (module_if->get_memory_domains != NULL) {
2201 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2202 	}
2203 }
2204 
2205 int
2206 spdk_accel_initialize(void)
2207 {
2208 	enum accel_opcode op;
2209 	struct spdk_accel_module_if *accel_module = NULL;
2210 	int rc;
2211 
2212 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2213 				       "SPDK_ACCEL_DMA_DEVICE");
2214 	if (rc != 0) {
2215 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2216 		return rc;
2217 	}
2218 
2219 	spdk_spin_init(&g_keyring_spin);
2220 
2221 	g_modules_started = true;
2222 	accel_module_initialize();
2223 
2224 	/* Create our priority global map of opcodes to modules, we populate starting
2225 	 * with the software module (guaranteed to be first on the list) and then
2226 	 * updating opcodes with HW modules that have been initialized.
2227 	 * NOTE: all opcodes must be supported by software in the event that no HW
2228 	 * modules are initialized to support the operation.
2229 	 */
2230 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2231 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2232 			if (accel_module->supports_opcode(op)) {
2233 				g_modules_opc[op].module = accel_module;
2234 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2235 			}
2236 		}
2237 	}
2238 
2239 	/* Now lets check for overrides and apply all that exist */
2240 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2241 		if (g_modules_opc_override[op] != NULL) {
2242 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2243 			if (accel_module == NULL) {
2244 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2245 				rc = -EINVAL;
2246 				goto error;
2247 			}
2248 			if (accel_module->supports_opcode(op) == false) {
2249 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2250 				rc = -EINVAL;
2251 				goto error;
2252 			}
2253 			g_modules_opc[op].module = accel_module;
2254 		}
2255 	}
2256 
2257 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2258 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2259 		rc = -EINVAL;
2260 		goto error;
2261 	}
2262 
2263 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2264 		assert(g_modules_opc[op].module != NULL);
2265 		accel_module_init_opcode(op);
2266 	}
2267 
2268 	rc = spdk_iobuf_register_module("accel");
2269 	if (rc != 0) {
2270 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2271 		goto error;
2272 	}
2273 
2274 	/*
2275 	 * We need a unique identifier for the accel framework, so use the
2276 	 * spdk_accel_module_list address for this purpose.
2277 	 */
2278 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2279 				sizeof(struct accel_io_channel), "accel");
2280 
2281 	return 0;
2282 error:
2283 	spdk_memory_domain_destroy(g_accel_domain);
2284 
2285 	return rc;
2286 }
2287 
2288 static void
2289 accel_module_finish_cb(void)
2290 {
2291 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2292 
2293 	spdk_memory_domain_destroy(g_accel_domain);
2294 
2295 	cb_fn(g_fini_cb_arg);
2296 	g_fini_cb_fn = NULL;
2297 	g_fini_cb_arg = NULL;
2298 }
2299 
2300 static void
2301 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2302 			   const char *module_str)
2303 {
2304 	spdk_json_write_object_begin(w);
2305 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2306 	spdk_json_write_named_object_begin(w, "params");
2307 	spdk_json_write_named_string(w, "opname", opc_str);
2308 	spdk_json_write_named_string(w, "module", module_str);
2309 	spdk_json_write_object_end(w);
2310 	spdk_json_write_object_end(w);
2311 }
2312 
2313 static void
2314 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2315 {
2316 	spdk_json_write_named_string(w, "name", key->param.key_name);
2317 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2318 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2319 	if (key->param.hex_key2) {
2320 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2321 	}
2322 }
2323 
2324 void
2325 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2326 {
2327 	spdk_json_write_object_begin(w);
2328 	__accel_crypto_key_dump_param(w, key);
2329 	spdk_json_write_object_end(w);
2330 }
2331 
2332 static void
2333 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2334 				    struct spdk_accel_crypto_key *key)
2335 {
2336 	spdk_json_write_object_begin(w);
2337 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2338 	spdk_json_write_named_object_begin(w, "params");
2339 	__accel_crypto_key_dump_param(w, key);
2340 	spdk_json_write_object_end(w);
2341 	spdk_json_write_object_end(w);
2342 }
2343 
2344 static void
2345 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2346 {
2347 	struct spdk_accel_crypto_key *key;
2348 
2349 	spdk_spin_lock(&g_keyring_spin);
2350 	TAILQ_FOREACH(key, &g_keyring, link) {
2351 		if (full_dump) {
2352 			_accel_crypto_key_write_config_json(w, key);
2353 		} else {
2354 			_accel_crypto_key_dump_param(w, key);
2355 		}
2356 	}
2357 	spdk_spin_unlock(&g_keyring_spin);
2358 }
2359 
2360 void
2361 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2362 {
2363 	_accel_crypto_keys_write_config_json(w, false);
2364 }
2365 
2366 void
2367 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2368 {
2369 	struct spdk_accel_module_if *accel_module;
2370 	int i;
2371 
2372 	/*
2373 	 * The accel fw has no config, there may be some in
2374 	 * the modules though.
2375 	 */
2376 	spdk_json_write_array_begin(w);
2377 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2378 		if (accel_module->write_config_json) {
2379 			accel_module->write_config_json(w);
2380 		}
2381 	}
2382 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2383 		if (g_modules_opc_override[i]) {
2384 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2385 		}
2386 	}
2387 
2388 	_accel_crypto_keys_write_config_json(w, true);
2389 
2390 	spdk_json_write_array_end(w);
2391 }
2392 
2393 void
2394 spdk_accel_module_finish(void)
2395 {
2396 	if (!g_accel_module) {
2397 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2398 	} else {
2399 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2400 	}
2401 
2402 	if (!g_accel_module) {
2403 		spdk_spin_destroy(&g_keyring_spin);
2404 		accel_module_finish_cb();
2405 		return;
2406 	}
2407 
2408 	if (g_accel_module->module_fini) {
2409 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2410 	} else {
2411 		spdk_accel_module_finish();
2412 	}
2413 }
2414 
2415 void
2416 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2417 {
2418 	struct spdk_accel_crypto_key *key, *key_tmp;
2419 	enum accel_opcode op;
2420 
2421 	assert(cb_fn != NULL);
2422 
2423 	g_fini_cb_fn = cb_fn;
2424 	g_fini_cb_arg = cb_arg;
2425 
2426 	spdk_spin_lock(&g_keyring_spin);
2427 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2428 		accel_crypto_key_destroy_unsafe(key);
2429 	}
2430 	spdk_spin_unlock(&g_keyring_spin);
2431 
2432 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2433 		if (g_modules_opc_override[op] != NULL) {
2434 			free(g_modules_opc_override[op]);
2435 			g_modules_opc_override[op] = NULL;
2436 		}
2437 		g_modules_opc[op].module = NULL;
2438 	}
2439 
2440 	spdk_io_device_unregister(&spdk_accel_module_list, NULL);
2441 	spdk_accel_module_finish();
2442 }
2443 
2444 static struct spdk_accel_driver *
2445 accel_find_driver(const char *name)
2446 {
2447 	struct spdk_accel_driver *driver;
2448 
2449 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2450 		if (strcmp(driver->name, name) == 0) {
2451 			return driver;
2452 		}
2453 	}
2454 
2455 	return NULL;
2456 }
2457 
2458 int
2459 spdk_accel_set_driver(const char *name)
2460 {
2461 	struct spdk_accel_driver *driver;
2462 
2463 	driver = accel_find_driver(name);
2464 	if (driver == NULL) {
2465 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2466 		return -ENODEV;
2467 	}
2468 
2469 	g_accel_driver = driver;
2470 
2471 	return 0;
2472 }
2473 
2474 void
2475 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2476 {
2477 	if (accel_find_driver(driver->name)) {
2478 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2479 		assert(0);
2480 		return;
2481 	}
2482 
2483 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2484 }
2485 
2486 SPDK_LOG_REGISTER_COMPONENT(accel)
2487