xref: /spdk/lib/accel/accel.c (revision 95a367d64eadbe63e59259f0a9f30e525c345140)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 
23 /* Accelerator Framework: The following provides a top level
24  * generic API for the accelerator functions defined here. Modules,
25  * such as the one in /module/accel/ioat, supply the implementation
26  * with the exception of the pure software implementation contained
27  * later in this file.
28  */
29 
30 #define ALIGN_4K			0x1000
31 #define MAX_TASKS_PER_CHANNEL		0x800
32 #define ACCEL_SMALL_CACHE_SIZE		128
33 #define ACCEL_LARGE_CACHE_SIZE		16
34 /* Set MSB, so we don't return NULL pointers as buffers */
35 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
36 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
37 
38 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
39 #define ACCEL_CRYPTO_TWEAK_MODE_CHAR_MAX	32
40 
41 struct accel_module {
42 	struct spdk_accel_module_if	*module;
43 	bool				supports_memory_domains;
44 };
45 
46 /* Largest context size for all accel modules */
47 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
48 
49 static struct spdk_accel_module_if *g_accel_module = NULL;
50 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
51 static void *g_fini_cb_arg = NULL;
52 static bool g_modules_started = false;
53 static struct spdk_memory_domain *g_accel_domain;
54 
55 /* Global list of registered accelerator modules */
56 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
57 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
58 
59 /* Crypto keyring */
60 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
61 static struct spdk_spinlock g_keyring_spin;
62 
63 /* Global array mapping capabilities to modules */
64 static struct accel_module g_modules_opc[ACCEL_OPC_LAST] = {};
65 static char *g_modules_opc_override[ACCEL_OPC_LAST] = {};
66 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
67 static struct spdk_accel_driver *g_accel_driver;
68 static struct spdk_accel_opts g_opts = {
69 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
70 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
71 	.task_count = MAX_TASKS_PER_CHANNEL,
72 	.sequence_count = MAX_TASKS_PER_CHANNEL,
73 	.buf_count = MAX_TASKS_PER_CHANNEL,
74 };
75 static struct accel_stats g_stats;
76 static struct spdk_spinlock g_stats_lock;
77 
78 static const char *g_opcode_strings[ACCEL_OPC_LAST] = {
79 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
80 	"compress", "decompress", "encrypt", "decrypt", "xor"
81 };
82 
83 enum accel_sequence_state {
84 	ACCEL_SEQUENCE_STATE_INIT,
85 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
86 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
87 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
88 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
89 	ACCEL_SEQUENCE_STATE_PULL_DATA,
90 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
91 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
92 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
93 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
94 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
95 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
96 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
97 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC,
98 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK,
99 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE,
100 	ACCEL_SEQUENCE_STATE_ERROR,
101 	ACCEL_SEQUENCE_STATE_MAX,
102 };
103 
104 static const char *g_seq_states[]
105 __attribute__((unused)) = {
106 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
107 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
108 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
109 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
110 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
111 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
112 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
113 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
114 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
115 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
116 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
117 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
118 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
119 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC] = "driver-exec",
120 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK] = "driver-await-task",
121 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE] = "driver-complete",
122 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
123 	[ACCEL_SEQUENCE_STATE_MAX] = "",
124 };
125 
126 #define ACCEL_SEQUENCE_STATE_STRING(s) \
127 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
128 	 ? g_seq_states[s] : "unknown")
129 
130 struct accel_buffer {
131 	struct spdk_accel_sequence	*seq;
132 	void				*buf;
133 	uint64_t			len;
134 	struct spdk_iobuf_entry		iobuf;
135 	spdk_accel_sequence_get_buf_cb	cb_fn;
136 	void				*cb_ctx;
137 	TAILQ_ENTRY(accel_buffer)	link;
138 };
139 
140 struct accel_io_channel {
141 	struct spdk_io_channel			*module_ch[ACCEL_OPC_LAST];
142 	void					*task_pool_base;
143 	struct spdk_accel_sequence		*seq_pool_base;
144 	struct accel_buffer			*buf_pool_base;
145 	TAILQ_HEAD(, spdk_accel_task)		task_pool;
146 	TAILQ_HEAD(, spdk_accel_sequence)	seq_pool;
147 	TAILQ_HEAD(, accel_buffer)		buf_pool;
148 	struct spdk_iobuf_channel		iobuf;
149 	struct accel_stats			stats;
150 };
151 
152 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
153 
154 struct spdk_accel_sequence {
155 	struct accel_io_channel			*ch;
156 	struct accel_sequence_tasks		tasks;
157 	struct accel_sequence_tasks		completed;
158 	TAILQ_HEAD(, accel_buffer)		bounce_bufs;
159 	enum accel_sequence_state		state;
160 	int					status;
161 	bool					in_process_sequence;
162 	spdk_accel_completion_cb		cb_fn;
163 	void					*cb_arg;
164 	TAILQ_ENTRY(spdk_accel_sequence)	link;
165 };
166 
167 #define accel_update_stats(ch, event, v) \
168 	do { \
169 		(ch)->stats.event += (v); \
170 	} while (0)
171 
172 #define accel_update_task_stats(ch, task, event, v) \
173 	accel_update_stats(ch, operations[(task)->op_code].event, v)
174 
175 static inline void
176 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
177 {
178 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
179 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
180 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
181 	seq->state = state;
182 }
183 
184 static void
185 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
186 {
187 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
188 	assert(status != 0);
189 	seq->status = status;
190 }
191 
192 int
193 spdk_accel_get_opc_module_name(enum accel_opcode opcode, const char **module_name)
194 {
195 	if (opcode >= ACCEL_OPC_LAST) {
196 		/* invalid opcode */
197 		return -EINVAL;
198 	}
199 
200 	if (g_modules_opc[opcode].module) {
201 		*module_name = g_modules_opc[opcode].module->name;
202 	} else {
203 		return -ENOENT;
204 	}
205 
206 	return 0;
207 }
208 
209 void
210 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
211 {
212 	struct spdk_accel_module_if *accel_module;
213 	enum accel_opcode opcode;
214 	int j = 0;
215 
216 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
217 		for (opcode = 0; opcode < ACCEL_OPC_LAST; opcode++) {
218 			if (accel_module->supports_opcode(opcode)) {
219 				info->ops[j] = opcode;
220 				j++;
221 			}
222 		}
223 		info->name = accel_module->name;
224 		info->num_ops = j;
225 		fn(info);
226 		j = 0;
227 	}
228 }
229 
230 int
231 _accel_get_opc_name(enum accel_opcode opcode, const char **opcode_name)
232 {
233 	int rc = 0;
234 
235 	if (opcode < ACCEL_OPC_LAST) {
236 		*opcode_name = g_opcode_strings[opcode];
237 	} else {
238 		/* invalid opcode */
239 		rc = -EINVAL;
240 	}
241 
242 	return rc;
243 }
244 
245 int
246 spdk_accel_assign_opc(enum accel_opcode opcode, const char *name)
247 {
248 	if (g_modules_started == true) {
249 		/* we don't allow re-assignment once things have started */
250 		return -EINVAL;
251 	}
252 
253 	if (opcode >= ACCEL_OPC_LAST) {
254 		/* invalid opcode */
255 		return -EINVAL;
256 	}
257 
258 	/* module selection will be validated after the framework starts. */
259 	g_modules_opc_override[opcode] = strdup(name);
260 
261 	return 0;
262 }
263 
264 void
265 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
266 {
267 	struct accel_io_channel *accel_ch = accel_task->accel_ch;
268 	spdk_accel_completion_cb	cb_fn = accel_task->cb_fn;
269 	void				*cb_arg = accel_task->cb_arg;
270 
271 	/* We should put the accel_task into the list firstly in order to avoid
272 	 * the accel task list is exhausted when there is recursive call to
273 	 * allocate accel_task in user's call back function (cb_fn)
274 	 */
275 	TAILQ_INSERT_HEAD(&accel_ch->task_pool, accel_task, link);
276 
277 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
278 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
279 	if (spdk_unlikely(status != 0)) {
280 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
281 	}
282 
283 	cb_fn(cb_arg, status);
284 }
285 
286 inline static struct spdk_accel_task *
287 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
288 {
289 	struct spdk_accel_task *accel_task;
290 
291 	accel_task = TAILQ_FIRST(&accel_ch->task_pool);
292 	if (accel_task == NULL) {
293 		return NULL;
294 	}
295 
296 	TAILQ_REMOVE(&accel_ch->task_pool, accel_task, link);
297 	accel_task->link.tqe_next = NULL;
298 	accel_task->link.tqe_prev = NULL;
299 
300 	accel_task->cb_fn = cb_fn;
301 	accel_task->cb_arg = cb_arg;
302 	accel_task->accel_ch = accel_ch;
303 	accel_task->bounce.s.orig_iovs = NULL;
304 	accel_task->bounce.d.orig_iovs = NULL;
305 
306 	return accel_task;
307 }
308 
309 static inline int
310 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
311 {
312 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
313 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
314 	int rc;
315 
316 	rc = module->submit_tasks(module_ch, task);
317 	if (spdk_unlikely(rc != 0)) {
318 		accel_update_task_stats(accel_ch, task, failed, 1);
319 	}
320 
321 	return rc;
322 }
323 
324 static inline uint64_t
325 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
326 {
327 	uint64_t result = 0;
328 	uint32_t i;
329 
330 	for (i = 0; i < iovcnt; ++i) {
331 		result += iovs[i].iov_len;
332 	}
333 
334 	return result;
335 }
336 
337 /* Accel framework public API for copy function */
338 int
339 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
340 		       uint64_t nbytes, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
341 {
342 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
343 	struct spdk_accel_task *accel_task;
344 
345 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
346 	if (accel_task == NULL) {
347 		return -ENOMEM;
348 	}
349 
350 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
351 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
352 	accel_task->d.iovs[0].iov_base = dst;
353 	accel_task->d.iovs[0].iov_len = nbytes;
354 	accel_task->d.iovcnt = 1;
355 	accel_task->s.iovs[0].iov_base = src;
356 	accel_task->s.iovs[0].iov_len = nbytes;
357 	accel_task->s.iovcnt = 1;
358 	accel_task->nbytes = nbytes;
359 	accel_task->op_code = ACCEL_OPC_COPY;
360 	accel_task->flags = flags;
361 	accel_task->src_domain = NULL;
362 	accel_task->dst_domain = NULL;
363 	accel_task->step_cb_fn = NULL;
364 
365 	return accel_submit_task(accel_ch, accel_task);
366 }
367 
368 /* Accel framework public API for dual cast copy function */
369 int
370 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
371 			   void *dst2, void *src, uint64_t nbytes, int flags,
372 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
373 {
374 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
375 	struct spdk_accel_task *accel_task;
376 
377 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
378 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
379 		return -EINVAL;
380 	}
381 
382 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
383 	if (accel_task == NULL) {
384 		return -ENOMEM;
385 	}
386 
387 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
388 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
389 	accel_task->d2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST2];
390 	accel_task->d.iovs[0].iov_base = dst1;
391 	accel_task->d.iovs[0].iov_len = nbytes;
392 	accel_task->d.iovcnt = 1;
393 	accel_task->d2.iovs[0].iov_base = dst2;
394 	accel_task->d2.iovs[0].iov_len = nbytes;
395 	accel_task->d2.iovcnt = 1;
396 	accel_task->s.iovs[0].iov_base = src;
397 	accel_task->s.iovs[0].iov_len = nbytes;
398 	accel_task->s.iovcnt = 1;
399 	accel_task->nbytes = nbytes;
400 	accel_task->flags = flags;
401 	accel_task->op_code = ACCEL_OPC_DUALCAST;
402 	accel_task->src_domain = NULL;
403 	accel_task->dst_domain = NULL;
404 	accel_task->step_cb_fn = NULL;
405 
406 	return accel_submit_task(accel_ch, accel_task);
407 }
408 
409 /* Accel framework public API for compare function */
410 int
411 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
412 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
413 			  void *cb_arg)
414 {
415 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
416 	struct spdk_accel_task *accel_task;
417 
418 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
419 	if (accel_task == NULL) {
420 		return -ENOMEM;
421 	}
422 
423 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
424 	accel_task->s2.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC2];
425 	accel_task->s.iovs[0].iov_base = src1;
426 	accel_task->s.iovs[0].iov_len = nbytes;
427 	accel_task->s.iovcnt = 1;
428 	accel_task->s2.iovs[0].iov_base = src2;
429 	accel_task->s2.iovs[0].iov_len = nbytes;
430 	accel_task->s2.iovcnt = 1;
431 	accel_task->nbytes = nbytes;
432 	accel_task->op_code = ACCEL_OPC_COMPARE;
433 	accel_task->src_domain = NULL;
434 	accel_task->dst_domain = NULL;
435 	accel_task->step_cb_fn = NULL;
436 
437 	return accel_submit_task(accel_ch, accel_task);
438 }
439 
440 /* Accel framework public API for fill function */
441 int
442 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
443 		       uint8_t fill, uint64_t nbytes, int flags,
444 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
445 {
446 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
447 	struct spdk_accel_task *accel_task;
448 
449 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
450 	if (accel_task == NULL) {
451 		return -ENOMEM;
452 	}
453 
454 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
455 	accel_task->d.iovs[0].iov_base = dst;
456 	accel_task->d.iovs[0].iov_len = nbytes;
457 	accel_task->d.iovcnt = 1;
458 	accel_task->nbytes = nbytes;
459 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
460 	accel_task->flags = flags;
461 	accel_task->op_code = ACCEL_OPC_FILL;
462 	accel_task->src_domain = NULL;
463 	accel_task->dst_domain = NULL;
464 	accel_task->step_cb_fn = NULL;
465 
466 	return accel_submit_task(accel_ch, accel_task);
467 }
468 
469 /* Accel framework public API for CRC-32C function */
470 int
471 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
472 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
473 			 void *cb_arg)
474 {
475 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
476 	struct spdk_accel_task *accel_task;
477 
478 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
479 	if (accel_task == NULL) {
480 		return -ENOMEM;
481 	}
482 
483 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
484 	accel_task->s.iovs[0].iov_base = src;
485 	accel_task->s.iovs[0].iov_len = nbytes;
486 	accel_task->s.iovcnt = 1;
487 	accel_task->nbytes = nbytes;
488 	accel_task->crc_dst = crc_dst;
489 	accel_task->seed = seed;
490 	accel_task->op_code = ACCEL_OPC_CRC32C;
491 	accel_task->src_domain = NULL;
492 	accel_task->dst_domain = NULL;
493 	accel_task->step_cb_fn = NULL;
494 
495 	return accel_submit_task(accel_ch, accel_task);
496 }
497 
498 /* Accel framework public API for chained CRC-32C function */
499 int
500 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
501 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
502 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
503 {
504 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
505 	struct spdk_accel_task *accel_task;
506 
507 	if (iov == NULL) {
508 		SPDK_ERRLOG("iov should not be NULL");
509 		return -EINVAL;
510 	}
511 
512 	if (!iov_cnt) {
513 		SPDK_ERRLOG("iovcnt should not be zero value\n");
514 		return -EINVAL;
515 	}
516 
517 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
518 	if (accel_task == NULL) {
519 		SPDK_ERRLOG("no memory\n");
520 		assert(0);
521 		return -ENOMEM;
522 	}
523 
524 	accel_task->s.iovs = iov;
525 	accel_task->s.iovcnt = iov_cnt;
526 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
527 	accel_task->crc_dst = crc_dst;
528 	accel_task->seed = seed;
529 	accel_task->op_code = ACCEL_OPC_CRC32C;
530 	accel_task->src_domain = NULL;
531 	accel_task->dst_domain = NULL;
532 	accel_task->step_cb_fn = NULL;
533 
534 	return accel_submit_task(accel_ch, accel_task);
535 }
536 
537 /* Accel framework public API for copy with CRC-32C function */
538 int
539 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
540 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
541 			      int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
542 {
543 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
544 	struct spdk_accel_task *accel_task;
545 
546 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
547 	if (accel_task == NULL) {
548 		return -ENOMEM;
549 	}
550 
551 	accel_task->s.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_SRC];
552 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
553 	accel_task->d.iovs[0].iov_base = dst;
554 	accel_task->d.iovs[0].iov_len = nbytes;
555 	accel_task->d.iovcnt = 1;
556 	accel_task->s.iovs[0].iov_base = src;
557 	accel_task->s.iovs[0].iov_len = nbytes;
558 	accel_task->s.iovcnt = 1;
559 	accel_task->nbytes = nbytes;
560 	accel_task->crc_dst = crc_dst;
561 	accel_task->seed = seed;
562 	accel_task->flags = flags;
563 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
564 	accel_task->src_domain = NULL;
565 	accel_task->dst_domain = NULL;
566 	accel_task->step_cb_fn = NULL;
567 
568 	return accel_submit_task(accel_ch, accel_task);
569 }
570 
571 /* Accel framework public API for chained copy + CRC-32C function */
572 int
573 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
574 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
575 			       uint32_t seed, int flags, spdk_accel_completion_cb cb_fn, void *cb_arg)
576 {
577 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
578 	struct spdk_accel_task *accel_task;
579 	uint64_t nbytes;
580 
581 	if (src_iovs == NULL) {
582 		SPDK_ERRLOG("iov should not be NULL");
583 		return -EINVAL;
584 	}
585 
586 	if (!iov_cnt) {
587 		SPDK_ERRLOG("iovcnt should not be zero value\n");
588 		return -EINVAL;
589 	}
590 
591 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
592 	if (accel_task == NULL) {
593 		SPDK_ERRLOG("no memory\n");
594 		assert(0);
595 		return -ENOMEM;
596 	}
597 
598 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
599 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
600 	accel_task->d.iovs[0].iov_base = dst;
601 	accel_task->d.iovs[0].iov_len = nbytes;
602 	accel_task->d.iovcnt = 1;
603 	accel_task->s.iovs = src_iovs;
604 	accel_task->s.iovcnt = iov_cnt;
605 	accel_task->nbytes = nbytes;
606 	accel_task->crc_dst = crc_dst;
607 	accel_task->seed = seed;
608 	accel_task->flags = flags;
609 	accel_task->op_code = ACCEL_OPC_COPY_CRC32C;
610 	accel_task->src_domain = NULL;
611 	accel_task->dst_domain = NULL;
612 	accel_task->step_cb_fn = NULL;
613 
614 	return accel_submit_task(accel_ch, accel_task);
615 }
616 
617 int
618 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
619 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size, int flags,
620 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
621 {
622 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
623 	struct spdk_accel_task *accel_task;
624 
625 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
626 	if (accel_task == NULL) {
627 		return -ENOMEM;
628 	}
629 
630 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
631 	accel_task->d.iovs[0].iov_base = dst;
632 	accel_task->d.iovs[0].iov_len = nbytes;
633 	accel_task->d.iovcnt = 1;
634 	accel_task->output_size = output_size;
635 	accel_task->s.iovs = src_iovs;
636 	accel_task->s.iovcnt = src_iovcnt;
637 	accel_task->nbytes = nbytes;
638 	accel_task->flags = flags;
639 	accel_task->op_code = ACCEL_OPC_COMPRESS;
640 	accel_task->src_domain = NULL;
641 	accel_task->dst_domain = NULL;
642 	accel_task->step_cb_fn = NULL;
643 
644 	return accel_submit_task(accel_ch, accel_task);
645 }
646 
647 int
648 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
649 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
650 			     uint32_t *output_size, int flags, spdk_accel_completion_cb cb_fn,
651 			     void *cb_arg)
652 {
653 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
654 	struct spdk_accel_task *accel_task;
655 
656 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
657 	if (accel_task == NULL) {
658 		return -ENOMEM;
659 	}
660 
661 	accel_task->output_size = output_size;
662 	accel_task->s.iovs = src_iovs;
663 	accel_task->s.iovcnt = src_iovcnt;
664 	accel_task->d.iovs = dst_iovs;
665 	accel_task->d.iovcnt = dst_iovcnt;
666 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
667 	accel_task->flags = flags;
668 	accel_task->op_code = ACCEL_OPC_DECOMPRESS;
669 	accel_task->src_domain = NULL;
670 	accel_task->dst_domain = NULL;
671 	accel_task->step_cb_fn = NULL;
672 
673 	return accel_submit_task(accel_ch, accel_task);
674 }
675 
676 int
677 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
678 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
679 			  struct iovec *src_iovs, uint32_t src_iovcnt,
680 			  uint64_t iv, uint32_t block_size, int flags,
681 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
682 {
683 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
684 	struct spdk_accel_task *accel_task;
685 
686 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
687 		return -EINVAL;
688 	}
689 
690 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
691 	if (accel_task == NULL) {
692 		return -ENOMEM;
693 	}
694 
695 	accel_task->crypto_key = key;
696 	accel_task->s.iovs = src_iovs;
697 	accel_task->s.iovcnt = src_iovcnt;
698 	accel_task->d.iovs = dst_iovs;
699 	accel_task->d.iovcnt = dst_iovcnt;
700 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
701 	accel_task->iv = iv;
702 	accel_task->block_size = block_size;
703 	accel_task->flags = flags;
704 	accel_task->op_code = ACCEL_OPC_ENCRYPT;
705 	accel_task->src_domain = NULL;
706 	accel_task->dst_domain = NULL;
707 	accel_task->step_cb_fn = NULL;
708 
709 	return accel_submit_task(accel_ch, accel_task);
710 }
711 
712 int
713 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
714 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
715 			  struct iovec *src_iovs, uint32_t src_iovcnt,
716 			  uint64_t iv, uint32_t block_size, int flags,
717 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
718 {
719 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
720 	struct spdk_accel_task *accel_task;
721 
722 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
723 		return -EINVAL;
724 	}
725 
726 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
727 	if (accel_task == NULL) {
728 		return -ENOMEM;
729 	}
730 
731 	accel_task->crypto_key = key;
732 	accel_task->s.iovs = src_iovs;
733 	accel_task->s.iovcnt = src_iovcnt;
734 	accel_task->d.iovs = dst_iovs;
735 	accel_task->d.iovcnt = dst_iovcnt;
736 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
737 	accel_task->iv = iv;
738 	accel_task->block_size = block_size;
739 	accel_task->flags = flags;
740 	accel_task->op_code = ACCEL_OPC_DECRYPT;
741 	accel_task->src_domain = NULL;
742 	accel_task->dst_domain = NULL;
743 	accel_task->step_cb_fn = NULL;
744 
745 	return accel_submit_task(accel_ch, accel_task);
746 }
747 
748 int
749 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
750 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
751 {
752 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
753 	struct spdk_accel_task *accel_task;
754 
755 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
756 	if (accel_task == NULL) {
757 		return -ENOMEM;
758 	}
759 
760 	accel_task->nsrcs.srcs = sources;
761 	accel_task->nsrcs.cnt = nsrcs;
762 	accel_task->d.iovs = &accel_task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
763 	accel_task->d.iovs[0].iov_base = dst;
764 	accel_task->d.iovs[0].iov_len = nbytes;
765 	accel_task->d.iovcnt = 1;
766 	accel_task->nbytes = nbytes;
767 	accel_task->op_code = ACCEL_OPC_XOR;
768 	accel_task->src_domain = NULL;
769 	accel_task->dst_domain = NULL;
770 	accel_task->step_cb_fn = NULL;
771 
772 	return accel_submit_task(accel_ch, accel_task);
773 }
774 
775 static inline struct accel_buffer *
776 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
777 {
778 	struct accel_buffer *buf;
779 
780 	buf = TAILQ_FIRST(&ch->buf_pool);
781 	if (spdk_unlikely(buf == NULL)) {
782 		return NULL;
783 	}
784 
785 	TAILQ_REMOVE(&ch->buf_pool, buf, link);
786 	buf->len = len;
787 	buf->buf = NULL;
788 	buf->seq = NULL;
789 	buf->cb_fn = NULL;
790 
791 	return buf;
792 }
793 
794 static inline void
795 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
796 {
797 	if (buf->buf != NULL) {
798 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
799 	}
800 
801 	TAILQ_INSERT_HEAD(&ch->buf_pool, buf, link);
802 }
803 
804 static inline struct spdk_accel_sequence *
805 accel_sequence_get(struct accel_io_channel *ch)
806 {
807 	struct spdk_accel_sequence *seq;
808 
809 	seq = TAILQ_FIRST(&ch->seq_pool);
810 	if (seq == NULL) {
811 		return NULL;
812 	}
813 
814 	TAILQ_REMOVE(&ch->seq_pool, seq, link);
815 
816 	TAILQ_INIT(&seq->tasks);
817 	TAILQ_INIT(&seq->completed);
818 	TAILQ_INIT(&seq->bounce_bufs);
819 
820 	seq->ch = ch;
821 	seq->status = 0;
822 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
823 	seq->in_process_sequence = false;
824 
825 	return seq;
826 }
827 
828 static inline void
829 accel_sequence_put(struct spdk_accel_sequence *seq)
830 {
831 	struct accel_io_channel *ch = seq->ch;
832 	struct accel_buffer *buf;
833 
834 	while (!TAILQ_EMPTY(&seq->bounce_bufs)) {
835 		buf = TAILQ_FIRST(&seq->bounce_bufs);
836 		TAILQ_REMOVE(&seq->bounce_bufs, buf, link);
837 		accel_put_buf(seq->ch, buf);
838 	}
839 
840 	assert(TAILQ_EMPTY(&seq->tasks));
841 	assert(TAILQ_EMPTY(&seq->completed));
842 	seq->ch = NULL;
843 
844 	TAILQ_INSERT_HEAD(&ch->seq_pool, seq, link);
845 }
846 
847 static void accel_sequence_task_cb(void *cb_arg, int status);
848 
849 static inline struct spdk_accel_task *
850 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
851 			spdk_accel_step_cb cb_fn, void *cb_arg)
852 {
853 	struct spdk_accel_task *task;
854 
855 	task = _get_task(ch, accel_sequence_task_cb, seq);
856 	if (task == NULL) {
857 		return task;
858 	}
859 
860 	task->step_cb_fn = cb_fn;
861 	task->step_cb_arg = cb_arg;
862 
863 	return task;
864 }
865 
866 int
867 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
868 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
869 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
870 		       struct iovec *src_iovs, uint32_t src_iovcnt,
871 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
872 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
873 {
874 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
875 	struct spdk_accel_task *task;
876 	struct spdk_accel_sequence *seq = *pseq;
877 
878 	if (seq == NULL) {
879 		seq = accel_sequence_get(accel_ch);
880 		if (spdk_unlikely(seq == NULL)) {
881 			return -ENOMEM;
882 		}
883 	}
884 
885 	assert(seq->ch == accel_ch);
886 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
887 	if (spdk_unlikely(task == NULL)) {
888 		if (*pseq == NULL) {
889 			accel_sequence_put(seq);
890 		}
891 
892 		return -ENOMEM;
893 	}
894 
895 	task->dst_domain = dst_domain;
896 	task->dst_domain_ctx = dst_domain_ctx;
897 	task->d.iovs = dst_iovs;
898 	task->d.iovcnt = dst_iovcnt;
899 	task->src_domain = src_domain;
900 	task->src_domain_ctx = src_domain_ctx;
901 	task->s.iovs = src_iovs;
902 	task->s.iovcnt = src_iovcnt;
903 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
904 	task->flags = flags;
905 	task->op_code = ACCEL_OPC_COPY;
906 
907 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
908 	*pseq = seq;
909 
910 	return 0;
911 }
912 
913 int
914 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
915 		       void *buf, uint64_t len,
916 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
917 		       int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
918 {
919 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
920 	struct spdk_accel_task *task;
921 	struct spdk_accel_sequence *seq = *pseq;
922 
923 	if (seq == NULL) {
924 		seq = accel_sequence_get(accel_ch);
925 		if (spdk_unlikely(seq == NULL)) {
926 			return -ENOMEM;
927 		}
928 	}
929 
930 	assert(seq->ch == accel_ch);
931 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
932 	if (spdk_unlikely(task == NULL)) {
933 		if (*pseq == NULL) {
934 			accel_sequence_put(seq);
935 		}
936 
937 		return -ENOMEM;
938 	}
939 
940 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
941 
942 	task->d.iovs = &task->aux_iovs[SPDK_ACCEL_AUX_IOV_DST];
943 	task->d.iovs[0].iov_base = buf;
944 	task->d.iovs[0].iov_len = len;
945 	task->d.iovcnt = 1;
946 	task->nbytes = len;
947 	task->src_domain = NULL;
948 	task->dst_domain = domain;
949 	task->dst_domain_ctx = domain_ctx;
950 	task->flags = flags;
951 	task->op_code = ACCEL_OPC_FILL;
952 
953 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
954 	*pseq = seq;
955 
956 	return 0;
957 }
958 
959 int
960 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
961 			     struct iovec *dst_iovs, size_t dst_iovcnt,
962 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
963 			     struct iovec *src_iovs, size_t src_iovcnt,
964 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
965 			     int flags, spdk_accel_step_cb cb_fn, void *cb_arg)
966 {
967 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
968 	struct spdk_accel_task *task;
969 	struct spdk_accel_sequence *seq = *pseq;
970 
971 	if (seq == NULL) {
972 		seq = accel_sequence_get(accel_ch);
973 		if (spdk_unlikely(seq == NULL)) {
974 			return -ENOMEM;
975 		}
976 	}
977 
978 	assert(seq->ch == accel_ch);
979 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
980 	if (spdk_unlikely(task == NULL)) {
981 		if (*pseq == NULL) {
982 			accel_sequence_put(seq);
983 		}
984 
985 		return -ENOMEM;
986 	}
987 
988 	/* TODO: support output_size for chaining */
989 	task->output_size = NULL;
990 	task->dst_domain = dst_domain;
991 	task->dst_domain_ctx = dst_domain_ctx;
992 	task->d.iovs = dst_iovs;
993 	task->d.iovcnt = dst_iovcnt;
994 	task->src_domain = src_domain;
995 	task->src_domain_ctx = src_domain_ctx;
996 	task->s.iovs = src_iovs;
997 	task->s.iovcnt = src_iovcnt;
998 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
999 	task->flags = flags;
1000 	task->op_code = ACCEL_OPC_DECOMPRESS;
1001 
1002 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1003 	*pseq = seq;
1004 
1005 	return 0;
1006 }
1007 
1008 int
1009 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1010 			  struct spdk_accel_crypto_key *key,
1011 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1012 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1013 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1014 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1015 			  uint64_t iv, uint32_t block_size, int flags,
1016 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1017 {
1018 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1019 	struct spdk_accel_task *task;
1020 	struct spdk_accel_sequence *seq = *pseq;
1021 
1022 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1023 			  !block_size)) {
1024 		return -EINVAL;
1025 	}
1026 
1027 	if (seq == NULL) {
1028 		seq = accel_sequence_get(accel_ch);
1029 		if (spdk_unlikely(seq == NULL)) {
1030 			return -ENOMEM;
1031 		}
1032 	}
1033 
1034 	assert(seq->ch == accel_ch);
1035 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1036 	if (spdk_unlikely(task == NULL)) {
1037 		if (*pseq == NULL) {
1038 			accel_sequence_put(seq);
1039 		}
1040 
1041 		return -ENOMEM;
1042 	}
1043 
1044 	task->crypto_key = key;
1045 	task->src_domain = src_domain;
1046 	task->src_domain_ctx = src_domain_ctx;
1047 	task->s.iovs = src_iovs;
1048 	task->s.iovcnt = src_iovcnt;
1049 	task->dst_domain = dst_domain;
1050 	task->dst_domain_ctx = dst_domain_ctx;
1051 	task->d.iovs = dst_iovs;
1052 	task->d.iovcnt = dst_iovcnt;
1053 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1054 	task->iv = iv;
1055 	task->block_size = block_size;
1056 	task->flags = flags;
1057 	task->op_code = ACCEL_OPC_ENCRYPT;
1058 
1059 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1060 	*pseq = seq;
1061 
1062 	return 0;
1063 }
1064 
1065 int
1066 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1067 			  struct spdk_accel_crypto_key *key,
1068 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1069 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1070 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1071 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1072 			  uint64_t iv, uint32_t block_size, int flags,
1073 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1074 {
1075 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1076 	struct spdk_accel_task *task;
1077 	struct spdk_accel_sequence *seq = *pseq;
1078 
1079 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key ||
1080 			  !block_size)) {
1081 		return -EINVAL;
1082 	}
1083 
1084 	if (seq == NULL) {
1085 		seq = accel_sequence_get(accel_ch);
1086 		if (spdk_unlikely(seq == NULL)) {
1087 			return -ENOMEM;
1088 		}
1089 	}
1090 
1091 	assert(seq->ch == accel_ch);
1092 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1093 	if (spdk_unlikely(task == NULL)) {
1094 		if (*pseq == NULL) {
1095 			accel_sequence_put(seq);
1096 		}
1097 
1098 		return -ENOMEM;
1099 	}
1100 
1101 	task->crypto_key = key;
1102 	task->src_domain = src_domain;
1103 	task->src_domain_ctx = src_domain_ctx;
1104 	task->s.iovs = src_iovs;
1105 	task->s.iovcnt = src_iovcnt;
1106 	task->dst_domain = dst_domain;
1107 	task->dst_domain_ctx = dst_domain_ctx;
1108 	task->d.iovs = dst_iovs;
1109 	task->d.iovcnt = dst_iovcnt;
1110 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1111 	task->iv = iv;
1112 	task->block_size = block_size;
1113 	task->flags = flags;
1114 	task->op_code = ACCEL_OPC_DECRYPT;
1115 
1116 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1117 	*pseq = seq;
1118 
1119 	return 0;
1120 }
1121 
1122 int
1123 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1124 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1125 			 struct spdk_memory_domain *domain, void *domain_ctx,
1126 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1127 {
1128 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1129 	struct spdk_accel_task *task;
1130 	struct spdk_accel_sequence *seq = *pseq;
1131 
1132 	if (seq == NULL) {
1133 		seq = accel_sequence_get(accel_ch);
1134 		if (spdk_unlikely(seq == NULL)) {
1135 			return -ENOMEM;
1136 		}
1137 	}
1138 
1139 	assert(seq->ch == accel_ch);
1140 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1141 	if (spdk_unlikely(task == NULL)) {
1142 		if (*pseq == NULL) {
1143 			accel_sequence_put(seq);
1144 		}
1145 
1146 		return -ENOMEM;
1147 	}
1148 
1149 	task->s.iovs = iovs;
1150 	task->s.iovcnt = iovcnt;
1151 	task->src_domain = domain;
1152 	task->src_domain_ctx = domain_ctx;
1153 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1154 	task->crc_dst = dst;
1155 	task->seed = seed;
1156 	task->op_code = ACCEL_OPC_CRC32C;
1157 	task->dst_domain = NULL;
1158 
1159 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1160 	*pseq = seq;
1161 
1162 	return 0;
1163 }
1164 
1165 int
1166 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1167 		   struct spdk_memory_domain **domain, void **domain_ctx)
1168 {
1169 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1170 	struct accel_buffer *accel_buf;
1171 
1172 	accel_buf = accel_get_buf(accel_ch, len);
1173 	if (spdk_unlikely(accel_buf == NULL)) {
1174 		return -ENOMEM;
1175 	}
1176 
1177 	/* We always return the same pointer and identify the buffers through domain_ctx */
1178 	*buf = ACCEL_BUFFER_BASE;
1179 	*domain_ctx = accel_buf;
1180 	*domain = g_accel_domain;
1181 
1182 	return 0;
1183 }
1184 
1185 void
1186 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1187 		   struct spdk_memory_domain *domain, void *domain_ctx)
1188 {
1189 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1190 	struct accel_buffer *accel_buf = domain_ctx;
1191 
1192 	assert(domain == g_accel_domain);
1193 	assert(buf == ACCEL_BUFFER_BASE);
1194 
1195 	accel_put_buf(accel_ch, accel_buf);
1196 }
1197 
1198 static void
1199 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1200 {
1201 	struct spdk_accel_task *task;
1202 	struct accel_io_channel *ch = seq->ch;
1203 	spdk_accel_step_cb cb_fn;
1204 	void *cb_arg;
1205 
1206 	while (!TAILQ_EMPTY(&seq->completed)) {
1207 		task = TAILQ_FIRST(&seq->completed);
1208 		TAILQ_REMOVE(&seq->completed, task, seq_link);
1209 		cb_fn = task->step_cb_fn;
1210 		cb_arg = task->step_cb_arg;
1211 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1212 		if (cb_fn != NULL) {
1213 			cb_fn(cb_arg);
1214 		}
1215 	}
1216 
1217 	while (!TAILQ_EMPTY(&seq->tasks)) {
1218 		task = TAILQ_FIRST(&seq->tasks);
1219 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1220 		cb_fn = task->step_cb_fn;
1221 		cb_arg = task->step_cb_arg;
1222 		TAILQ_INSERT_HEAD(&ch->task_pool, task, link);
1223 		if (cb_fn != NULL) {
1224 			cb_fn(cb_arg);
1225 		}
1226 	}
1227 }
1228 
1229 static void
1230 accel_sequence_complete(struct spdk_accel_sequence *seq)
1231 {
1232 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, seq->status);
1233 
1234 	accel_update_stats(seq->ch, sequence_executed, 1);
1235 	if (spdk_unlikely(seq->status != 0)) {
1236 		accel_update_stats(seq->ch, sequence_failed, 1);
1237 	}
1238 
1239 	/* First notify all users that appended operations to this sequence */
1240 	accel_sequence_complete_tasks(seq);
1241 
1242 	/* Then notify the user that finished the sequence */
1243 	seq->cb_fn(seq->cb_arg, seq->status);
1244 
1245 	accel_sequence_put(seq);
1246 }
1247 
1248 static void
1249 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1250 {
1251 	uintptr_t offset;
1252 
1253 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1254 	assert(offset < accel_buf->len);
1255 
1256 	diov->iov_base = (char *)accel_buf->buf + offset;
1257 	diov->iov_len = siov->iov_len;
1258 }
1259 
1260 static void
1261 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1262 {
1263 	struct spdk_accel_task *task;
1264 	struct iovec *iov;
1265 
1266 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1267 	 * in a sequence that were using it.
1268 	 */
1269 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1270 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1271 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1272 			assert(task->s.iovcnt == 1);
1273 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1274 			task->src_domain = NULL;
1275 			task->s.iovs = iov;
1276 		}
1277 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1278 			iov = &task->aux_iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1279 			assert(task->d.iovcnt == 1);
1280 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1281 			task->dst_domain = NULL;
1282 			task->d.iovs = iov;
1283 		}
1284 	}
1285 }
1286 
1287 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1288 
1289 static void
1290 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1291 {
1292 	struct accel_buffer *accel_buf;
1293 
1294 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1295 
1296 	assert(accel_buf->seq != NULL);
1297 	assert(accel_buf->buf == NULL);
1298 	accel_buf->buf = buf;
1299 
1300 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1301 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1302 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1303 	accel_process_sequence(accel_buf->seq);
1304 }
1305 
1306 static bool
1307 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1308 			 spdk_iobuf_get_cb cb_fn)
1309 {
1310 	struct accel_io_channel *ch = seq->ch;
1311 
1312 	assert(buf->buf == NULL);
1313 	assert(buf->seq == NULL);
1314 
1315 	buf->seq = seq;
1316 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1317 	if (buf->buf == NULL) {
1318 		return false;
1319 	}
1320 
1321 	return true;
1322 }
1323 
1324 static bool
1325 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1326 {
1327 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1328 	 * NULL */
1329 	if (task->src_domain == g_accel_domain) {
1330 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1331 					      accel_iobuf_get_virtbuf_cb)) {
1332 			return false;
1333 		}
1334 
1335 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1336 	}
1337 
1338 	if (task->dst_domain == g_accel_domain) {
1339 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1340 					      accel_iobuf_get_virtbuf_cb)) {
1341 			return false;
1342 		}
1343 
1344 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1345 	}
1346 
1347 	return true;
1348 }
1349 
1350 static void
1351 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1352 {
1353 	struct accel_buffer *accel_buf;
1354 
1355 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1356 
1357 	assert(accel_buf->seq != NULL);
1358 	assert(accel_buf->buf == NULL);
1359 	accel_buf->buf = buf;
1360 
1361 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1362 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1363 }
1364 
1365 bool
1366 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1367 			      struct spdk_memory_domain *domain, void *domain_ctx,
1368 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1369 {
1370 	struct accel_buffer *accel_buf = domain_ctx;
1371 
1372 	assert(domain == g_accel_domain);
1373 	accel_buf->cb_fn = cb_fn;
1374 	accel_buf->cb_ctx = cb_ctx;
1375 
1376 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1377 		return false;
1378 	}
1379 
1380 	accel_sequence_set_virtbuf(seq, accel_buf);
1381 
1382 	return true;
1383 }
1384 
1385 struct spdk_accel_task *
1386 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1387 {
1388 	return TAILQ_FIRST(&seq->tasks);
1389 }
1390 
1391 struct spdk_accel_task *
1392 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1393 {
1394 	return TAILQ_NEXT(task, seq_link);
1395 }
1396 
1397 static inline void
1398 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1399 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1400 			struct accel_buffer *buf)
1401 {
1402 	bounce->orig_iovs = *iovs;
1403 	bounce->orig_iovcnt = *iovcnt;
1404 	bounce->orig_domain = *domain;
1405 	bounce->orig_domain_ctx = *domain_ctx;
1406 	bounce->iov.iov_base = buf->buf;
1407 	bounce->iov.iov_len = buf->len;
1408 
1409 	*iovs = &bounce->iov;
1410 	*iovcnt = 1;
1411 	*domain = NULL;
1412 }
1413 
1414 static void
1415 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1416 {
1417 	struct spdk_accel_task *task;
1418 	struct accel_buffer *accel_buf;
1419 
1420 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1421 	assert(accel_buf->buf == NULL);
1422 	accel_buf->buf = buf;
1423 
1424 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1425 	assert(task != NULL);
1426 
1427 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1428 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1429 	accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1430 				&task->src_domain_ctx, accel_buf);
1431 	accel_process_sequence(accel_buf->seq);
1432 }
1433 
1434 static void
1435 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1436 {
1437 	struct spdk_accel_task *task;
1438 	struct accel_buffer *accel_buf;
1439 
1440 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1441 	assert(accel_buf->buf == NULL);
1442 	accel_buf->buf = buf;
1443 
1444 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1445 	assert(task != NULL);
1446 
1447 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1448 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1449 	accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1450 				&task->dst_domain_ctx, accel_buf);
1451 	accel_process_sequence(accel_buf->seq);
1452 }
1453 
1454 static int
1455 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1456 {
1457 	struct accel_buffer *buf;
1458 
1459 	if (task->src_domain != NULL) {
1460 		/* By the time we're here, accel buffers should have been allocated */
1461 		assert(task->src_domain != g_accel_domain);
1462 
1463 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1464 		if (buf == NULL) {
1465 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1466 			return -ENOMEM;
1467 		}
1468 
1469 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1470 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1471 			return -EAGAIN;
1472 		}
1473 
1474 		accel_set_bounce_buffer(&task->bounce.s, &task->s.iovs, &task->s.iovcnt,
1475 					&task->src_domain, &task->src_domain_ctx, buf);
1476 	}
1477 
1478 	if (task->dst_domain != NULL) {
1479 		/* By the time we're here, accel buffers should have been allocated */
1480 		assert(task->dst_domain != g_accel_domain);
1481 
1482 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1483 		if (buf == NULL) {
1484 			/* The src buffer will be released when a sequence is completed */
1485 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1486 			return -ENOMEM;
1487 		}
1488 
1489 		TAILQ_INSERT_TAIL(&seq->bounce_bufs, buf, link);
1490 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1491 			return -EAGAIN;
1492 		}
1493 
1494 		accel_set_bounce_buffer(&task->bounce.d, &task->d.iovs, &task->d.iovcnt,
1495 					&task->dst_domain, &task->dst_domain_ctx, buf);
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 static void
1502 accel_task_pull_data_cb(void *ctx, int status)
1503 {
1504 	struct spdk_accel_sequence *seq = ctx;
1505 
1506 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1507 	if (spdk_likely(status == 0)) {
1508 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1509 	} else {
1510 		accel_sequence_set_fail(seq, status);
1511 	}
1512 
1513 	accel_process_sequence(seq);
1514 }
1515 
1516 static void
1517 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1518 {
1519 	int rc;
1520 
1521 	assert(task->bounce.s.orig_iovs != NULL);
1522 	assert(task->bounce.s.orig_domain != NULL);
1523 	assert(task->bounce.s.orig_domain != g_accel_domain);
1524 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1525 
1526 	rc = spdk_memory_domain_pull_data(task->bounce.s.orig_domain,
1527 					  task->bounce.s.orig_domain_ctx,
1528 					  task->bounce.s.orig_iovs, task->bounce.s.orig_iovcnt,
1529 					  task->s.iovs, task->s.iovcnt,
1530 					  accel_task_pull_data_cb, seq);
1531 	if (spdk_unlikely(rc != 0)) {
1532 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1533 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1534 		accel_sequence_set_fail(seq, rc);
1535 	}
1536 }
1537 
1538 static void
1539 accel_task_push_data_cb(void *ctx, int status)
1540 {
1541 	struct spdk_accel_sequence *seq = ctx;
1542 
1543 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1544 	if (spdk_likely(status == 0)) {
1545 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1546 	} else {
1547 		accel_sequence_set_fail(seq, status);
1548 	}
1549 
1550 	accel_process_sequence(seq);
1551 }
1552 
1553 static void
1554 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1555 {
1556 	int rc;
1557 
1558 	assert(task->bounce.d.orig_iovs != NULL);
1559 	assert(task->bounce.d.orig_domain != NULL);
1560 	assert(task->bounce.d.orig_domain != g_accel_domain);
1561 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1562 
1563 	rc = spdk_memory_domain_push_data(task->bounce.d.orig_domain,
1564 					  task->bounce.d.orig_domain_ctx,
1565 					  task->bounce.d.orig_iovs, task->bounce.d.orig_iovcnt,
1566 					  task->d.iovs, task->d.iovcnt,
1567 					  accel_task_push_data_cb, seq);
1568 	if (spdk_unlikely(rc != 0)) {
1569 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1570 			    spdk_memory_domain_get_dma_device_id(task->bounce.s.orig_domain), rc);
1571 		accel_sequence_set_fail(seq, rc);
1572 	}
1573 }
1574 
1575 static void
1576 accel_process_sequence(struct spdk_accel_sequence *seq)
1577 {
1578 	struct accel_io_channel *accel_ch = seq->ch;
1579 	struct spdk_accel_task *task;
1580 	enum accel_sequence_state state;
1581 	int rc;
1582 
1583 	/* Prevent recursive calls to this function */
1584 	if (spdk_unlikely(seq->in_process_sequence)) {
1585 		return;
1586 	}
1587 	seq->in_process_sequence = true;
1588 
1589 	task = TAILQ_FIRST(&seq->tasks);
1590 	assert(task != NULL);
1591 
1592 	do {
1593 		state = seq->state;
1594 		switch (state) {
1595 		case ACCEL_SEQUENCE_STATE_INIT:
1596 			if (g_accel_driver != NULL) {
1597 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC);
1598 				break;
1599 			}
1600 		/* Fall through */
1601 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
1602 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1603 			if (!accel_sequence_check_virtbuf(seq, task)) {
1604 				/* We couldn't allocate a buffer, wait until one is available */
1605 				break;
1606 			}
1607 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1608 		/* Fall through */
1609 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
1610 			/* If a module supports memory domains, we don't need to allocate bounce
1611 			 * buffers */
1612 			if (g_modules_opc[task->op_code].supports_memory_domains) {
1613 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1614 				break;
1615 			}
1616 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1617 			rc = accel_sequence_check_bouncebuf(seq, task);
1618 			if (rc != 0) {
1619 				/* We couldn't allocate a buffer, wait until one is available */
1620 				if (rc == -EAGAIN) {
1621 					break;
1622 				}
1623 				accel_sequence_set_fail(seq, rc);
1624 				break;
1625 			}
1626 			if (task->bounce.s.orig_iovs != NULL) {
1627 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
1628 				break;
1629 			}
1630 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1631 		/* Fall through */
1632 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
1633 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
1634 				      g_opcode_strings[task->op_code], seq);
1635 
1636 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
1637 			rc = accel_submit_task(accel_ch, task);
1638 			if (spdk_unlikely(rc != 0)) {
1639 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
1640 					    g_opcode_strings[task->op_code], seq);
1641 				accel_sequence_set_fail(seq, rc);
1642 			}
1643 			break;
1644 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
1645 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1646 			accel_task_pull_data(seq, task);
1647 			break;
1648 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
1649 			if (task->bounce.d.orig_iovs != NULL) {
1650 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
1651 				break;
1652 			}
1653 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1654 			break;
1655 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
1656 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1657 			accel_task_push_data(seq, task);
1658 			break;
1659 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
1660 			TAILQ_REMOVE(&seq->tasks, task, seq_link);
1661 			TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1662 			/* Check if there are any remaining tasks */
1663 			task = TAILQ_FIRST(&seq->tasks);
1664 			if (task == NULL) {
1665 				/* Immediately return here to make sure we don't touch the sequence
1666 				 * after it's completed */
1667 				accel_sequence_complete(seq);
1668 				return;
1669 			}
1670 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
1671 			break;
1672 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC:
1673 			assert(!TAILQ_EMPTY(&seq->tasks));
1674 
1675 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1676 			rc = g_accel_driver->execute_sequence(seq);
1677 			if (spdk_unlikely(rc != 0)) {
1678 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
1679 					    seq, g_accel_driver->name);
1680 				accel_sequence_set_fail(seq, rc);
1681 			}
1682 			break;
1683 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE:
1684 			task = TAILQ_FIRST(&seq->tasks);
1685 			if (task == NULL) {
1686 				/* Immediately return here to make sure we don't touch the sequence
1687 				 * after it's completed */
1688 				accel_sequence_complete(seq);
1689 				return;
1690 			}
1691 			/* We don't want to execute the next task through the driver, so we
1692 			 * explicitly omit the INIT state here */
1693 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1694 			break;
1695 		case ACCEL_SEQUENCE_STATE_ERROR:
1696 			/* Immediately return here to make sure we don't touch the sequence
1697 			 * after it's completed */
1698 			assert(seq->status != 0);
1699 			accel_sequence_complete(seq);
1700 			return;
1701 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
1702 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
1703 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
1704 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1705 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
1706 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1707 			break;
1708 		default:
1709 			assert(0 && "bad state");
1710 			break;
1711 		}
1712 	} while (seq->state != state);
1713 
1714 	seq->in_process_sequence = false;
1715 }
1716 
1717 static void
1718 accel_sequence_task_cb(void *cb_arg, int status)
1719 {
1720 	struct spdk_accel_sequence *seq = cb_arg;
1721 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
1722 	struct accel_io_channel *accel_ch = seq->ch;
1723 
1724 	/* spdk_accel_task_complete() puts the task back to the task pool, but we don't want to do
1725 	 * that if a task is part of a sequence.  Removing the task from that pool here is the
1726 	 * easiest way to prevent this, even though it is a bit hacky.
1727 	 */
1728 	assert(task != NULL);
1729 	TAILQ_REMOVE(&accel_ch->task_pool, task, link);
1730 
1731 	switch (seq->state) {
1732 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
1733 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
1734 		if (spdk_unlikely(status != 0)) {
1735 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
1736 				    g_opcode_strings[task->op_code], seq);
1737 			accel_sequence_set_fail(seq, status);
1738 		}
1739 
1740 		accel_process_sequence(seq);
1741 		break;
1742 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK:
1743 		assert(g_accel_driver != NULL);
1744 		/* Immediately remove the task from the outstanding list to make sure the next call
1745 		 * to spdk_accel_sequence_first_task() doesn't return it */
1746 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1747 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1748 
1749 		if (spdk_unlikely(status != 0)) {
1750 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
1751 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
1752 				    g_accel_driver->name);
1753 			/* Update status without using accel_sequence_set_fail() to avoid changing
1754 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
1755 			seq->status = status;
1756 		}
1757 		break;
1758 	default:
1759 		assert(0 && "bad state");
1760 		break;
1761 	}
1762 }
1763 
1764 void
1765 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
1766 {
1767 	assert(g_accel_driver != NULL);
1768 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASK);
1769 
1770 	if (spdk_likely(seq->status == 0)) {
1771 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE);
1772 	} else {
1773 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
1774 	}
1775 
1776 	accel_process_sequence(seq);
1777 }
1778 
1779 static bool
1780 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
1781 {
1782 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
1783 	if (iovacnt != iovbcnt) {
1784 		return false;
1785 	}
1786 
1787 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
1788 }
1789 
1790 static bool
1791 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
1792 {
1793 	struct spdk_accel_task *prev;
1794 
1795 	switch (task->op_code) {
1796 	case ACCEL_OPC_DECOMPRESS:
1797 	case ACCEL_OPC_FILL:
1798 	case ACCEL_OPC_ENCRYPT:
1799 	case ACCEL_OPC_DECRYPT:
1800 		if (task->dst_domain != next->src_domain) {
1801 			return false;
1802 		}
1803 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1804 					next->s.iovs, next->s.iovcnt)) {
1805 			return false;
1806 		}
1807 		task->d.iovs = next->d.iovs;
1808 		task->d.iovcnt = next->d.iovcnt;
1809 		task->dst_domain = next->dst_domain;
1810 		task->dst_domain_ctx = next->dst_domain_ctx;
1811 		break;
1812 	case ACCEL_OPC_CRC32C:
1813 		/* crc32 is special, because it doesn't have a dst buffer */
1814 		if (task->src_domain != next->src_domain) {
1815 			return false;
1816 		}
1817 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
1818 					next->s.iovs, next->s.iovcnt)) {
1819 			return false;
1820 		}
1821 		/* We can only change crc32's buffer if we can change previous task's buffer */
1822 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
1823 		if (prev == NULL) {
1824 			return false;
1825 		}
1826 		if (!accel_task_set_dstbuf(prev, next)) {
1827 			return false;
1828 		}
1829 		task->s.iovs = next->d.iovs;
1830 		task->s.iovcnt = next->d.iovcnt;
1831 		task->src_domain = next->dst_domain;
1832 		task->src_domain_ctx = next->dst_domain_ctx;
1833 		break;
1834 	default:
1835 		return false;
1836 	}
1837 
1838 	return true;
1839 }
1840 
1841 static void
1842 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
1843 			   struct spdk_accel_task **next_task)
1844 {
1845 	struct spdk_accel_task *next = *next_task;
1846 
1847 	switch (task->op_code) {
1848 	case ACCEL_OPC_COPY:
1849 		/* We only allow changing src of operations that actually have a src, e.g. we never
1850 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
1851 		 * change the src of the operation after fill (which in turn could also be a fill).
1852 		 * So, for the sake of simplicity, skip this type of operations for now.
1853 		 */
1854 		if (next->op_code != ACCEL_OPC_DECOMPRESS &&
1855 		    next->op_code != ACCEL_OPC_COPY &&
1856 		    next->op_code != ACCEL_OPC_ENCRYPT &&
1857 		    next->op_code != ACCEL_OPC_DECRYPT &&
1858 		    next->op_code != ACCEL_OPC_CRC32C) {
1859 			break;
1860 		}
1861 		if (task->dst_domain != next->src_domain) {
1862 			break;
1863 		}
1864 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
1865 					next->s.iovs, next->s.iovcnt)) {
1866 			break;
1867 		}
1868 		next->s.iovs = task->s.iovs;
1869 		next->s.iovcnt = task->s.iovcnt;
1870 		next->src_domain = task->src_domain;
1871 		next->src_domain_ctx = task->src_domain_ctx;
1872 		TAILQ_REMOVE(&seq->tasks, task, seq_link);
1873 		TAILQ_INSERT_TAIL(&seq->completed, task, seq_link);
1874 		break;
1875 	case ACCEL_OPC_DECOMPRESS:
1876 	case ACCEL_OPC_FILL:
1877 	case ACCEL_OPC_ENCRYPT:
1878 	case ACCEL_OPC_DECRYPT:
1879 	case ACCEL_OPC_CRC32C:
1880 		/* We can only merge tasks when one of them is a copy */
1881 		if (next->op_code != ACCEL_OPC_COPY) {
1882 			break;
1883 		}
1884 		if (!accel_task_set_dstbuf(task, next)) {
1885 			break;
1886 		}
1887 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
1888 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
1889 		*next_task = TAILQ_NEXT(next, seq_link);
1890 		TAILQ_REMOVE(&seq->tasks, next, seq_link);
1891 		TAILQ_INSERT_TAIL(&seq->completed, next, seq_link);
1892 		break;
1893 	default:
1894 		assert(0 && "bad opcode");
1895 		break;
1896 	}
1897 }
1898 
1899 void
1900 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
1901 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
1902 {
1903 	struct spdk_accel_task *task, *next;
1904 
1905 	/* Try to remove any copy operations if possible */
1906 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
1907 		if (next == NULL) {
1908 			break;
1909 		}
1910 		accel_sequence_merge_tasks(seq, task, &next);
1911 	}
1912 
1913 	seq->cb_fn = cb_fn;
1914 	seq->cb_arg = cb_arg;
1915 
1916 	accel_process_sequence(seq);
1917 }
1918 
1919 void
1920 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
1921 {
1922 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
1923 	struct spdk_accel_task *task;
1924 
1925 	assert(TAILQ_EMPTY(&seq->completed));
1926 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
1927 
1928 	while (!TAILQ_EMPTY(&tasks)) {
1929 		task = TAILQ_FIRST(&tasks);
1930 		TAILQ_REMOVE(&tasks, task, seq_link);
1931 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
1932 	}
1933 }
1934 
1935 void
1936 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
1937 {
1938 	if (seq == NULL) {
1939 		return;
1940 	}
1941 
1942 	accel_sequence_complete_tasks(seq);
1943 	accel_sequence_put(seq);
1944 }
1945 
1946 struct spdk_memory_domain *
1947 spdk_accel_get_memory_domain(void)
1948 {
1949 	return g_accel_domain;
1950 }
1951 
1952 static struct spdk_accel_module_if *
1953 _module_find_by_name(const char *name)
1954 {
1955 	struct spdk_accel_module_if *accel_module = NULL;
1956 
1957 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
1958 		if (strcmp(name, accel_module->name) == 0) {
1959 			break;
1960 		}
1961 	}
1962 
1963 	return accel_module;
1964 }
1965 
1966 static inline struct spdk_accel_crypto_key *
1967 _accel_crypto_key_get(const char *name)
1968 {
1969 	struct spdk_accel_crypto_key *key;
1970 
1971 	assert(spdk_spin_held(&g_keyring_spin));
1972 
1973 	TAILQ_FOREACH(key, &g_keyring, link) {
1974 		if (strcmp(name, key->param.key_name) == 0) {
1975 			return key;
1976 		}
1977 	}
1978 
1979 	return NULL;
1980 }
1981 
1982 static void
1983 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
1984 {
1985 	if (key->param.hex_key) {
1986 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
1987 		free(key->param.hex_key);
1988 	}
1989 	if (key->param.hex_key2) {
1990 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
1991 		free(key->param.hex_key2);
1992 	}
1993 	free(key->param.tweak_mode);
1994 	free(key->param.key_name);
1995 	free(key->param.cipher);
1996 	if (key->key) {
1997 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
1998 		free(key->key);
1999 	}
2000 	if (key->key2) {
2001 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2002 		free(key->key2);
2003 	}
2004 	free(key);
2005 }
2006 
2007 static void
2008 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2009 {
2010 	assert(key->module_if);
2011 	assert(key->module_if->crypto_key_deinit);
2012 
2013 	key->module_if->crypto_key_deinit(key);
2014 	accel_crypto_key_free_mem(key);
2015 }
2016 
2017 /*
2018  * This function mitigates a timing side channel which could be caused by using strcmp()
2019  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2020  * the article [1] for more details
2021  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2022  */
2023 static bool
2024 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2025 {
2026 	size_t i;
2027 	volatile size_t x = k1_len ^ k2_len;
2028 
2029 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2030 		x |= k1[i] ^ k2[i];
2031 	}
2032 
2033 	return x == 0;
2034 }
2035 
2036 static const char *g_tweak_modes[SPDK_ACCEL_CRYPTO_TWEAK_MODE_MAX] = {
2037 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2038 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2039 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2040 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2041 };
2042 
2043 int
2044 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2045 {
2046 	struct spdk_accel_module_if *module;
2047 	struct spdk_accel_crypto_key *key;
2048 	size_t hex_key_size, hex_key2_size;
2049 	int rc;
2050 
2051 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2052 		return -EINVAL;
2053 	}
2054 
2055 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2056 		/* hardly ever possible, but let's check and warn the user */
2057 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2058 	}
2059 	module = g_modules_opc[ACCEL_OPC_ENCRYPT].module;
2060 
2061 	if (!module) {
2062 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2063 		return -ENOENT;
2064 	}
2065 	if (!module->crypto_key_init) {
2066 		SPDK_ERRLOG("Accel module \"%s\" doesn't support crypto operations\n", module->name);
2067 		return -ENOTSUP;
2068 	}
2069 
2070 	key = calloc(1, sizeof(*key));
2071 	if (!key) {
2072 		return -ENOMEM;
2073 	}
2074 
2075 	key->param.key_name = strdup(param->key_name);
2076 	if (!key->param.key_name) {
2077 		rc = -ENOMEM;
2078 		goto error;
2079 	}
2080 
2081 	key->param.cipher = strdup(param->cipher);
2082 	if (!key->param.cipher) {
2083 		rc = -ENOMEM;
2084 		goto error;
2085 	}
2086 
2087 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2088 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2089 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2090 		rc = -EINVAL;
2091 		goto error;
2092 	}
2093 	key->param.hex_key = strdup(param->hex_key);
2094 	if (!key->param.hex_key) {
2095 		rc = -ENOMEM;
2096 		goto error;
2097 	}
2098 
2099 	key->key_size = hex_key_size / 2;
2100 	key->key = spdk_unhexlify(key->param.hex_key);
2101 	if (!key->key) {
2102 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2103 		rc = -EINVAL;
2104 		goto error;
2105 	}
2106 
2107 	if (param->hex_key2) {
2108 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2109 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2110 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2111 			rc = -EINVAL;
2112 			goto error;
2113 		}
2114 		key->param.hex_key2 = strdup(param->hex_key2);
2115 		if (!key->param.hex_key2) {
2116 			rc = -ENOMEM;
2117 			goto error;
2118 		}
2119 
2120 		key->key2_size = hex_key2_size / 2;
2121 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2122 		if (!key->key2) {
2123 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2124 			rc = -EINVAL;
2125 			goto error;
2126 		}
2127 
2128 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2129 			SPDK_ERRLOG("Identical keys are not secure\n");
2130 			rc = -EINVAL;
2131 			goto error;
2132 		}
2133 	}
2134 
2135 
2136 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2137 	if (param->tweak_mode) {
2138 		bool found = false;
2139 
2140 		key->param.tweak_mode = strdup(param->tweak_mode);
2141 		if (!key->param.tweak_mode) {
2142 			rc = -ENOMEM;
2143 			goto error;
2144 		}
2145 
2146 		for (uint32_t i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2147 			assert(strlen(g_tweak_modes[i]) < ACCEL_CRYPTO_TWEAK_MODE_CHAR_MAX);
2148 
2149 			if (strncmp(param->tweak_mode, g_tweak_modes[i], ACCEL_CRYPTO_TWEAK_MODE_CHAR_MAX) == 0) {
2150 				key->tweak_mode = i;
2151 				found = true;
2152 				break;
2153 			}
2154 		}
2155 
2156 		if (!found) {
2157 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2158 			rc = -EINVAL;
2159 			goto error;
2160 		}
2161 	}
2162 
2163 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2164 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2165 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2166 			    g_tweak_modes[key->tweak_mode]);
2167 		rc = -EINVAL;
2168 		goto error;
2169 	}
2170 
2171 	key->module_if = module;
2172 
2173 	spdk_spin_lock(&g_keyring_spin);
2174 	if (_accel_crypto_key_get(param->key_name)) {
2175 		rc = -EEXIST;
2176 	} else {
2177 		rc = module->crypto_key_init(key);
2178 		if (!rc) {
2179 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2180 		}
2181 	}
2182 	spdk_spin_unlock(&g_keyring_spin);
2183 
2184 	if (rc) {
2185 		goto error;
2186 	}
2187 
2188 	return 0;
2189 
2190 error:
2191 	accel_crypto_key_free_mem(key);
2192 	return rc;
2193 }
2194 
2195 int
2196 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2197 {
2198 	if (!key || !key->module_if) {
2199 		return -EINVAL;
2200 	}
2201 
2202 	spdk_spin_lock(&g_keyring_spin);
2203 	if (!_accel_crypto_key_get(key->param.key_name)) {
2204 		spdk_spin_unlock(&g_keyring_spin);
2205 		return -ENOENT;
2206 	}
2207 	TAILQ_REMOVE(&g_keyring, key, link);
2208 	spdk_spin_unlock(&g_keyring_spin);
2209 
2210 	accel_crypto_key_destroy_unsafe(key);
2211 
2212 	return 0;
2213 }
2214 
2215 struct spdk_accel_crypto_key *
2216 spdk_accel_crypto_key_get(const char *name)
2217 {
2218 	struct spdk_accel_crypto_key *key;
2219 
2220 	spdk_spin_lock(&g_keyring_spin);
2221 	key = _accel_crypto_key_get(name);
2222 	spdk_spin_unlock(&g_keyring_spin);
2223 
2224 	return key;
2225 }
2226 
2227 /* Helper function when accel modules register with the framework. */
2228 void
2229 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2230 {
2231 	if (_module_find_by_name(accel_module->name)) {
2232 		SPDK_NOTICELOG("Accel module %s already registered\n", accel_module->name);
2233 		assert(false);
2234 		return;
2235 	}
2236 
2237 	/* Make sure that the software module is at the head of the list, this
2238 	 * will assure that all opcodes are later assigned to software first and
2239 	 * then updated to HW modules as they are registered.
2240 	 */
2241 	if (strcmp(accel_module->name, "software") == 0) {
2242 		TAILQ_INSERT_HEAD(&spdk_accel_module_list, accel_module, tailq);
2243 	} else {
2244 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2245 	}
2246 
2247 	if (accel_module->get_ctx_size && accel_module->get_ctx_size() > g_max_accel_module_size) {
2248 		g_max_accel_module_size = accel_module->get_ctx_size();
2249 	}
2250 }
2251 
2252 /* Framework level channel create callback. */
2253 static int
2254 accel_create_channel(void *io_device, void *ctx_buf)
2255 {
2256 	struct accel_io_channel	*accel_ch = ctx_buf;
2257 	struct spdk_accel_task *accel_task;
2258 	struct spdk_accel_sequence *seq;
2259 	struct accel_buffer *buf;
2260 	uint8_t *task_mem;
2261 	uint32_t i = 0, j;
2262 	int rc;
2263 
2264 	accel_ch->task_pool_base = calloc(g_opts.task_count, g_max_accel_module_size);
2265 	if (accel_ch->task_pool_base == NULL) {
2266 		return -ENOMEM;
2267 	}
2268 
2269 	accel_ch->seq_pool_base = calloc(g_opts.sequence_count, sizeof(struct spdk_accel_sequence));
2270 	if (accel_ch->seq_pool_base == NULL) {
2271 		goto err;
2272 	}
2273 
2274 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2275 	if (accel_ch->buf_pool_base == NULL) {
2276 		goto err;
2277 	}
2278 
2279 	TAILQ_INIT(&accel_ch->task_pool);
2280 	TAILQ_INIT(&accel_ch->seq_pool);
2281 	TAILQ_INIT(&accel_ch->buf_pool);
2282 
2283 	task_mem = accel_ch->task_pool_base;
2284 	for (i = 0; i < g_opts.task_count; i++) {
2285 		accel_task = (struct spdk_accel_task *)task_mem;
2286 		TAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2287 		task_mem += g_max_accel_module_size;
2288 	}
2289 	for (i = 0; i < g_opts.sequence_count; i++) {
2290 		seq = &accel_ch->seq_pool_base[i];
2291 		TAILQ_INSERT_TAIL(&accel_ch->seq_pool, seq, link);
2292 	}
2293 	for (i = 0; i < g_opts.buf_count; i++) {
2294 		buf = &accel_ch->buf_pool_base[i];
2295 		TAILQ_INSERT_TAIL(&accel_ch->buf_pool, buf, link);
2296 	}
2297 
2298 	/* Assign modules and get IO channels for each */
2299 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2300 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2301 		/* This can happen if idxd runs out of channels. */
2302 		if (accel_ch->module_ch[i] == NULL) {
2303 			goto err;
2304 		}
2305 	}
2306 
2307 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2308 				     g_opts.large_cache_size);
2309 	if (rc != 0) {
2310 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2311 		goto err;
2312 	}
2313 
2314 	return 0;
2315 err:
2316 	for (j = 0; j < i; j++) {
2317 		spdk_put_io_channel(accel_ch->module_ch[j]);
2318 	}
2319 	free(accel_ch->task_pool_base);
2320 	free(accel_ch->seq_pool_base);
2321 	free(accel_ch->buf_pool_base);
2322 
2323 	return -ENOMEM;
2324 }
2325 
2326 static void
2327 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2328 {
2329 	int i;
2330 
2331 	total->sequence_executed += stats->sequence_executed;
2332 	total->sequence_failed += stats->sequence_failed;
2333 	for (i = 0; i < ACCEL_OPC_LAST; ++i) {
2334 		total->operations[i].executed += stats->operations[i].executed;
2335 		total->operations[i].failed += stats->operations[i].failed;
2336 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2337 	}
2338 }
2339 
2340 /* Framework level channel destroy callback. */
2341 static void
2342 accel_destroy_channel(void *io_device, void *ctx_buf)
2343 {
2344 	struct accel_io_channel	*accel_ch = ctx_buf;
2345 	int i;
2346 
2347 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2348 
2349 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2350 		assert(accel_ch->module_ch[i] != NULL);
2351 		spdk_put_io_channel(accel_ch->module_ch[i]);
2352 		accel_ch->module_ch[i] = NULL;
2353 	}
2354 
2355 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2356 	spdk_spin_lock(&g_stats_lock);
2357 	accel_add_stats(&g_stats, &accel_ch->stats);
2358 	spdk_spin_unlock(&g_stats_lock);
2359 
2360 	free(accel_ch->task_pool_base);
2361 	free(accel_ch->seq_pool_base);
2362 	free(accel_ch->buf_pool_base);
2363 }
2364 
2365 struct spdk_io_channel *
2366 spdk_accel_get_io_channel(void)
2367 {
2368 	return spdk_get_io_channel(&spdk_accel_module_list);
2369 }
2370 
2371 static void
2372 accel_module_initialize(void)
2373 {
2374 	struct spdk_accel_module_if *accel_module;
2375 
2376 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2377 		accel_module->module_init();
2378 	}
2379 }
2380 
2381 static void
2382 accel_module_init_opcode(enum accel_opcode opcode)
2383 {
2384 	struct accel_module *module = &g_modules_opc[opcode];
2385 	struct spdk_accel_module_if *module_if = module->module;
2386 
2387 	if (module_if->get_memory_domains != NULL) {
2388 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2389 	}
2390 }
2391 
2392 int
2393 spdk_accel_initialize(void)
2394 {
2395 	enum accel_opcode op;
2396 	struct spdk_accel_module_if *accel_module = NULL;
2397 	int rc;
2398 
2399 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2400 				       "SPDK_ACCEL_DMA_DEVICE");
2401 	if (rc != 0) {
2402 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2403 		return rc;
2404 	}
2405 
2406 	spdk_spin_init(&g_keyring_spin);
2407 	spdk_spin_init(&g_stats_lock);
2408 
2409 	g_modules_started = true;
2410 	accel_module_initialize();
2411 
2412 	/* Create our priority global map of opcodes to modules, we populate starting
2413 	 * with the software module (guaranteed to be first on the list) and then
2414 	 * updating opcodes with HW modules that have been initialized.
2415 	 * NOTE: all opcodes must be supported by software in the event that no HW
2416 	 * modules are initialized to support the operation.
2417 	 */
2418 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2419 		for (op = 0; op < ACCEL_OPC_LAST; op++) {
2420 			if (accel_module->supports_opcode(op)) {
2421 				g_modules_opc[op].module = accel_module;
2422 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
2423 			}
2424 		}
2425 	}
2426 
2427 	/* Now lets check for overrides and apply all that exist */
2428 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2429 		if (g_modules_opc_override[op] != NULL) {
2430 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
2431 			if (accel_module == NULL) {
2432 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
2433 				rc = -EINVAL;
2434 				goto error;
2435 			}
2436 			if (accel_module->supports_opcode(op) == false) {
2437 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
2438 				rc = -EINVAL;
2439 				goto error;
2440 			}
2441 			g_modules_opc[op].module = accel_module;
2442 		}
2443 	}
2444 
2445 	if (g_modules_opc[ACCEL_OPC_ENCRYPT].module != g_modules_opc[ACCEL_OPC_DECRYPT].module) {
2446 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
2447 		rc = -EINVAL;
2448 		goto error;
2449 	}
2450 
2451 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2452 		assert(g_modules_opc[op].module != NULL);
2453 		accel_module_init_opcode(op);
2454 	}
2455 
2456 	rc = spdk_iobuf_register_module("accel");
2457 	if (rc != 0) {
2458 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
2459 		goto error;
2460 	}
2461 
2462 	/*
2463 	 * We need a unique identifier for the accel framework, so use the
2464 	 * spdk_accel_module_list address for this purpose.
2465 	 */
2466 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2467 				sizeof(struct accel_io_channel), "accel");
2468 
2469 	return 0;
2470 error:
2471 	spdk_memory_domain_destroy(g_accel_domain);
2472 
2473 	return rc;
2474 }
2475 
2476 static void
2477 accel_module_finish_cb(void)
2478 {
2479 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
2480 
2481 	spdk_memory_domain_destroy(g_accel_domain);
2482 
2483 	cb_fn(g_fini_cb_arg);
2484 	g_fini_cb_fn = NULL;
2485 	g_fini_cb_arg = NULL;
2486 }
2487 
2488 static void
2489 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
2490 			   const char *module_str)
2491 {
2492 	spdk_json_write_object_begin(w);
2493 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
2494 	spdk_json_write_named_object_begin(w, "params");
2495 	spdk_json_write_named_string(w, "opname", opc_str);
2496 	spdk_json_write_named_string(w, "module", module_str);
2497 	spdk_json_write_object_end(w);
2498 	spdk_json_write_object_end(w);
2499 }
2500 
2501 static void
2502 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2503 {
2504 	spdk_json_write_named_string(w, "name", key->param.key_name);
2505 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
2506 	spdk_json_write_named_string(w, "key", key->param.hex_key);
2507 	if (key->param.hex_key2) {
2508 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
2509 	}
2510 
2511 	if (key->param.tweak_mode) {
2512 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
2513 	}
2514 }
2515 
2516 void
2517 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
2518 {
2519 	spdk_json_write_object_begin(w);
2520 	__accel_crypto_key_dump_param(w, key);
2521 	spdk_json_write_object_end(w);
2522 }
2523 
2524 static void
2525 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
2526 				    struct spdk_accel_crypto_key *key)
2527 {
2528 	spdk_json_write_object_begin(w);
2529 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
2530 	spdk_json_write_named_object_begin(w, "params");
2531 	__accel_crypto_key_dump_param(w, key);
2532 	spdk_json_write_object_end(w);
2533 	spdk_json_write_object_end(w);
2534 }
2535 
2536 static void
2537 accel_write_options(struct spdk_json_write_ctx *w)
2538 {
2539 	spdk_json_write_object_begin(w);
2540 	spdk_json_write_named_string(w, "method", "accel_set_options");
2541 	spdk_json_write_named_object_begin(w, "params");
2542 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
2543 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
2544 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
2545 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
2546 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
2547 	spdk_json_write_object_end(w);
2548 	spdk_json_write_object_end(w);
2549 }
2550 
2551 static void
2552 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
2553 {
2554 	struct spdk_accel_crypto_key *key;
2555 
2556 	spdk_spin_lock(&g_keyring_spin);
2557 	TAILQ_FOREACH(key, &g_keyring, link) {
2558 		if (full_dump) {
2559 			_accel_crypto_key_write_config_json(w, key);
2560 		} else {
2561 			_accel_crypto_key_dump_param(w, key);
2562 		}
2563 	}
2564 	spdk_spin_unlock(&g_keyring_spin);
2565 }
2566 
2567 void
2568 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
2569 {
2570 	_accel_crypto_keys_write_config_json(w, false);
2571 }
2572 
2573 void
2574 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
2575 {
2576 	struct spdk_accel_module_if *accel_module;
2577 	int i;
2578 
2579 	spdk_json_write_array_begin(w);
2580 	accel_write_options(w);
2581 
2582 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2583 		if (accel_module->write_config_json) {
2584 			accel_module->write_config_json(w);
2585 		}
2586 	}
2587 	for (i = 0; i < ACCEL_OPC_LAST; i++) {
2588 		if (g_modules_opc_override[i]) {
2589 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
2590 		}
2591 	}
2592 
2593 	_accel_crypto_keys_write_config_json(w, true);
2594 
2595 	spdk_json_write_array_end(w);
2596 }
2597 
2598 void
2599 spdk_accel_module_finish(void)
2600 {
2601 	if (!g_accel_module) {
2602 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
2603 	} else {
2604 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
2605 	}
2606 
2607 	if (!g_accel_module) {
2608 		spdk_spin_destroy(&g_keyring_spin);
2609 		spdk_spin_destroy(&g_stats_lock);
2610 		accel_module_finish_cb();
2611 		return;
2612 	}
2613 
2614 	if (g_accel_module->module_fini) {
2615 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
2616 	} else {
2617 		spdk_accel_module_finish();
2618 	}
2619 }
2620 
2621 static void
2622 accel_io_device_unregister_cb(void *io_device)
2623 {
2624 	struct spdk_accel_crypto_key *key, *key_tmp;
2625 	enum accel_opcode op;
2626 
2627 	spdk_spin_lock(&g_keyring_spin);
2628 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
2629 		accel_crypto_key_destroy_unsafe(key);
2630 	}
2631 	spdk_spin_unlock(&g_keyring_spin);
2632 
2633 	for (op = 0; op < ACCEL_OPC_LAST; op++) {
2634 		if (g_modules_opc_override[op] != NULL) {
2635 			free(g_modules_opc_override[op]);
2636 			g_modules_opc_override[op] = NULL;
2637 		}
2638 		g_modules_opc[op].module = NULL;
2639 	}
2640 
2641 	spdk_accel_module_finish();
2642 }
2643 
2644 void
2645 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
2646 {
2647 	assert(cb_fn != NULL);
2648 
2649 	g_fini_cb_fn = cb_fn;
2650 	g_fini_cb_arg = cb_arg;
2651 
2652 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
2653 }
2654 
2655 static struct spdk_accel_driver *
2656 accel_find_driver(const char *name)
2657 {
2658 	struct spdk_accel_driver *driver;
2659 
2660 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
2661 		if (strcmp(driver->name, name) == 0) {
2662 			return driver;
2663 		}
2664 	}
2665 
2666 	return NULL;
2667 }
2668 
2669 int
2670 spdk_accel_set_driver(const char *name)
2671 {
2672 	struct spdk_accel_driver *driver;
2673 
2674 	driver = accel_find_driver(name);
2675 	if (driver == NULL) {
2676 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
2677 		return -ENODEV;
2678 	}
2679 
2680 	g_accel_driver = driver;
2681 
2682 	return 0;
2683 }
2684 
2685 void
2686 spdk_accel_driver_register(struct spdk_accel_driver *driver)
2687 {
2688 	if (accel_find_driver(driver->name)) {
2689 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
2690 		assert(0);
2691 		return;
2692 	}
2693 
2694 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
2695 }
2696 
2697 int
2698 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
2699 {
2700 	if (opts->size > sizeof(*opts)) {
2701 		return -EINVAL;
2702 	}
2703 
2704 	memcpy(&g_opts, opts, opts->size);
2705 
2706 	return 0;
2707 }
2708 
2709 void
2710 spdk_accel_get_opts(struct spdk_accel_opts *opts)
2711 {
2712 	size_t size = opts->size;
2713 
2714 	assert(size <= sizeof(*opts));
2715 
2716 	memcpy(opts, &g_opts, spdk_min(sizeof(*opts), size));
2717 	opts->size = size;
2718 }
2719 
2720 struct accel_get_stats_ctx {
2721 	struct accel_stats	stats;
2722 	accel_get_stats_cb	cb_fn;
2723 	void			*cb_arg;
2724 };
2725 
2726 static void
2727 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
2728 {
2729 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2730 
2731 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
2732 	free(ctx);
2733 }
2734 
2735 static void
2736 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
2737 {
2738 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
2739 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2740 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
2741 
2742 	accel_add_stats(&ctx->stats, &accel_ch->stats);
2743 	spdk_for_each_channel_continue(iter, 0);
2744 }
2745 
2746 int
2747 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
2748 {
2749 	struct accel_get_stats_ctx *ctx;
2750 
2751 	ctx = calloc(1, sizeof(*ctx));
2752 	if (ctx == NULL) {
2753 		return -ENOMEM;
2754 	}
2755 
2756 	spdk_spin_lock(&g_stats_lock);
2757 	accel_add_stats(&ctx->stats, &g_stats);
2758 	spdk_spin_unlock(&g_stats_lock);
2759 
2760 	ctx->cb_fn = cb_fn;
2761 	ctx->cb_arg = cb_arg;
2762 
2763 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
2764 			      accel_get_channel_stats_done);
2765 
2766 	return 0;
2767 }
2768 
2769 void
2770 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum accel_opcode opcode,
2771 			    struct spdk_accel_opcode_stats *stats, size_t size)
2772 {
2773 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
2774 
2775 #define FIELD_OK(field) \
2776 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
2777 
2778 #define SET_FIELD(field, value) \
2779 	if (FIELD_OK(field)) { \
2780 		stats->field = value; \
2781 	}
2782 
2783 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
2784 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
2785 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
2786 
2787 #undef FIELD_OK
2788 #undef SET_FIELD
2789 }
2790 
2791 SPDK_LOG_REGISTER_COMPONENT(accel)
2792