xref: /spdk/lib/accel/accel.c (revision e5693d682a9872b3bb3a84b3245a099af77992d6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2020 Intel Corporation.
3  *   Copyright (c) 2022, 2023 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/accel_module.h"
10 
11 #include "accel_internal.h"
12 
13 #include "spdk/dma.h"
14 #include "spdk/env.h"
15 #include "spdk/likely.h"
16 #include "spdk/log.h"
17 #include "spdk/thread.h"
18 #include "spdk/json.h"
19 #include "spdk/crc32.h"
20 #include "spdk/util.h"
21 #include "spdk/hexlify.h"
22 #include "spdk/string.h"
23 
24 /* Accelerator Framework: The following provides a top level
25  * generic API for the accelerator functions defined here. Modules,
26  * such as the one in /module/accel/ioat, supply the implementation
27  * with the exception of the pure software implementation contained
28  * later in this file.
29  */
30 
31 #define ALIGN_4K			0x1000
32 #define ACCEL_TASKS_PER_CHANNEL		2048
33 #define ACCEL_SMALL_CACHE_SIZE		128
34 #define ACCEL_LARGE_CACHE_SIZE		16
35 /* Set MSB, so we don't return NULL pointers as buffers */
36 #define ACCEL_BUFFER_BASE		((void *)(1ull << 63))
37 #define ACCEL_BUFFER_OFFSET_MASK	((uintptr_t)ACCEL_BUFFER_BASE - 1)
38 
39 #define ACCEL_CRYPTO_TWEAK_MODE_DEFAULT	SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA
40 #define ACCEL_TASKS_IN_SEQUENCE_LIMIT	8
41 
42 struct accel_module {
43 	struct spdk_accel_module_if	*module;
44 	bool				supports_memory_domains;
45 };
46 
47 /* Largest context size for all accel modules */
48 static size_t g_max_accel_module_size = sizeof(struct spdk_accel_task);
49 
50 static struct spdk_accel_module_if *g_accel_module = NULL;
51 static spdk_accel_fini_cb g_fini_cb_fn = NULL;
52 static void *g_fini_cb_arg = NULL;
53 static bool g_modules_started = false;
54 static struct spdk_memory_domain *g_accel_domain;
55 
56 /* Global list of registered accelerator modules */
57 static TAILQ_HEAD(, spdk_accel_module_if) spdk_accel_module_list =
58 	TAILQ_HEAD_INITIALIZER(spdk_accel_module_list);
59 
60 /* Crypto keyring */
61 static TAILQ_HEAD(, spdk_accel_crypto_key) g_keyring = TAILQ_HEAD_INITIALIZER(g_keyring);
62 static struct spdk_spinlock g_keyring_spin;
63 
64 /* Global array mapping capabilities to modules */
65 static struct accel_module g_modules_opc[SPDK_ACCEL_OPC_LAST] = {};
66 static char *g_modules_opc_override[SPDK_ACCEL_OPC_LAST] = {};
67 TAILQ_HEAD(, spdk_accel_driver) g_accel_drivers = TAILQ_HEAD_INITIALIZER(g_accel_drivers);
68 static struct spdk_accel_driver *g_accel_driver;
69 static struct spdk_accel_opts g_opts = {
70 	.small_cache_size = ACCEL_SMALL_CACHE_SIZE,
71 	.large_cache_size = ACCEL_LARGE_CACHE_SIZE,
72 	.task_count = ACCEL_TASKS_PER_CHANNEL,
73 	.sequence_count = ACCEL_TASKS_PER_CHANNEL,
74 	.buf_count = ACCEL_TASKS_PER_CHANNEL,
75 };
76 static struct accel_stats g_stats;
77 static struct spdk_spinlock g_stats_lock;
78 
79 static const char *g_opcode_strings[SPDK_ACCEL_OPC_LAST] = {
80 	"copy", "fill", "dualcast", "compare", "crc32c", "copy_crc32c",
81 	"compress", "decompress", "encrypt", "decrypt", "xor",
82 	"dif_verify", "dif_verify_copy", "dif_generate", "dif_generate_copy"
83 };
84 
85 enum accel_sequence_state {
86 	ACCEL_SEQUENCE_STATE_INIT,
87 	ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF,
88 	ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF,
89 	ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF,
90 	ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF,
91 	ACCEL_SEQUENCE_STATE_PULL_DATA,
92 	ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA,
93 	ACCEL_SEQUENCE_STATE_EXEC_TASK,
94 	ACCEL_SEQUENCE_STATE_AWAIT_TASK,
95 	ACCEL_SEQUENCE_STATE_COMPLETE_TASK,
96 	ACCEL_SEQUENCE_STATE_NEXT_TASK,
97 	ACCEL_SEQUENCE_STATE_PUSH_DATA,
98 	ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA,
99 	ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS,
100 	ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS,
101 	ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS,
102 	ACCEL_SEQUENCE_STATE_ERROR,
103 	ACCEL_SEQUENCE_STATE_MAX,
104 };
105 
106 static const char *g_seq_states[]
107 __attribute__((unused)) = {
108 	[ACCEL_SEQUENCE_STATE_INIT] = "init",
109 	[ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF] = "check-virtbuf",
110 	[ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF] = "await-virtbuf",
111 	[ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF] = "check-bouncebuf",
112 	[ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF] = "await-bouncebuf",
113 	[ACCEL_SEQUENCE_STATE_PULL_DATA] = "pull-data",
114 	[ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA] = "await-pull-data",
115 	[ACCEL_SEQUENCE_STATE_EXEC_TASK] = "exec-task",
116 	[ACCEL_SEQUENCE_STATE_AWAIT_TASK] = "await-task",
117 	[ACCEL_SEQUENCE_STATE_COMPLETE_TASK] = "complete-task",
118 	[ACCEL_SEQUENCE_STATE_NEXT_TASK] = "next-task",
119 	[ACCEL_SEQUENCE_STATE_PUSH_DATA] = "push-data",
120 	[ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA] = "await-push-data",
121 	[ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS] = "driver-exec-tasks",
122 	[ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS] = "driver-await-tasks",
123 	[ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS] = "driver-complete-tasks",
124 	[ACCEL_SEQUENCE_STATE_ERROR] = "error",
125 	[ACCEL_SEQUENCE_STATE_MAX] = "",
126 };
127 
128 #define ACCEL_SEQUENCE_STATE_STRING(s) \
129 	(((s) >= ACCEL_SEQUENCE_STATE_INIT && (s) < ACCEL_SEQUENCE_STATE_MAX) \
130 	 ? g_seq_states[s] : "unknown")
131 
132 struct accel_buffer {
133 	struct spdk_accel_sequence	*seq;
134 	void				*buf;
135 	uint64_t			len;
136 	struct spdk_iobuf_entry		iobuf;
137 	spdk_accel_sequence_get_buf_cb	cb_fn;
138 	void				*cb_ctx;
139 	SLIST_ENTRY(accel_buffer)	link;
140 	struct accel_io_channel		*ch;
141 };
142 
143 struct accel_io_channel {
144 	struct spdk_io_channel			*module_ch[SPDK_ACCEL_OPC_LAST];
145 	struct spdk_io_channel			*driver_channel;
146 	void					*task_pool_base;
147 	struct spdk_accel_sequence		*seq_pool_base;
148 	struct accel_buffer			*buf_pool_base;
149 	struct spdk_accel_task_aux_data		*task_aux_data_base;
150 	STAILQ_HEAD(, spdk_accel_task)		task_pool;
151 	SLIST_HEAD(, spdk_accel_task_aux_data)	task_aux_data_pool;
152 	SLIST_HEAD(, spdk_accel_sequence)	seq_pool;
153 	SLIST_HEAD(, accel_buffer)		buf_pool;
154 	struct spdk_iobuf_channel		iobuf;
155 	struct accel_stats			stats;
156 };
157 
158 TAILQ_HEAD(accel_sequence_tasks, spdk_accel_task);
159 
160 struct spdk_accel_sequence {
161 	struct accel_io_channel			*ch;
162 	struct accel_sequence_tasks		tasks;
163 	SLIST_HEAD(, accel_buffer)		bounce_bufs;
164 	int					status;
165 	/* state uses enum accel_sequence_state */
166 	uint8_t					state;
167 	bool					in_process_sequence;
168 	spdk_accel_completion_cb		cb_fn;
169 	void					*cb_arg;
170 	SLIST_ENTRY(spdk_accel_sequence)	link;
171 };
172 SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_sequence) == 64, "invalid size");
173 
174 #define accel_update_stats(ch, event, v) \
175 	do { \
176 		(ch)->stats.event += (v); \
177 	} while (0)
178 
179 #define accel_update_task_stats(ch, task, event, v) \
180 	accel_update_stats(ch, operations[(task)->op_code].event, v)
181 
182 static inline void accel_sequence_task_cb(void *cb_arg, int status);
183 
184 static inline void
185 accel_sequence_set_state(struct spdk_accel_sequence *seq, enum accel_sequence_state state)
186 {
187 	SPDK_DEBUGLOG(accel, "seq=%p, setting state: %s -> %s\n", seq,
188 		      ACCEL_SEQUENCE_STATE_STRING(seq->state), ACCEL_SEQUENCE_STATE_STRING(state));
189 	assert(seq->state != ACCEL_SEQUENCE_STATE_ERROR || state == ACCEL_SEQUENCE_STATE_ERROR);
190 	seq->state = state;
191 }
192 
193 static void
194 accel_sequence_set_fail(struct spdk_accel_sequence *seq, int status)
195 {
196 	accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
197 	assert(status != 0);
198 	seq->status = status;
199 }
200 
201 int
202 spdk_accel_get_opc_module_name(enum spdk_accel_opcode opcode, const char **module_name)
203 {
204 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
205 		/* invalid opcode */
206 		return -EINVAL;
207 	}
208 
209 	if (g_modules_opc[opcode].module) {
210 		*module_name = g_modules_opc[opcode].module->name;
211 	} else {
212 		return -ENOENT;
213 	}
214 
215 	return 0;
216 }
217 
218 void
219 _accel_for_each_module(struct module_info *info, _accel_for_each_module_fn fn)
220 {
221 	struct spdk_accel_module_if *accel_module;
222 	enum spdk_accel_opcode opcode;
223 	int j = 0;
224 
225 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
226 		for (opcode = 0; opcode < SPDK_ACCEL_OPC_LAST; opcode++) {
227 			if (accel_module->supports_opcode(opcode)) {
228 				info->ops[j] = opcode;
229 				j++;
230 			}
231 		}
232 		info->name = accel_module->name;
233 		info->num_ops = j;
234 		fn(info);
235 		j = 0;
236 	}
237 }
238 
239 const char *
240 spdk_accel_get_opcode_name(enum spdk_accel_opcode opcode)
241 {
242 	if (opcode < SPDK_ACCEL_OPC_LAST) {
243 		return g_opcode_strings[opcode];
244 	}
245 
246 	return NULL;
247 }
248 
249 int
250 spdk_accel_assign_opc(enum spdk_accel_opcode opcode, const char *name)
251 {
252 	char *copy;
253 
254 	if (g_modules_started == true) {
255 		/* we don't allow re-assignment once things have started */
256 		return -EINVAL;
257 	}
258 
259 	if (opcode >= SPDK_ACCEL_OPC_LAST) {
260 		/* invalid opcode */
261 		return -EINVAL;
262 	}
263 
264 	copy = strdup(name);
265 	if (copy == NULL) {
266 		return -ENOMEM;
267 	}
268 
269 	/* module selection will be validated after the framework starts. */
270 	free(g_modules_opc_override[opcode]);
271 	g_modules_opc_override[opcode] = copy;
272 
273 	return 0;
274 }
275 
276 inline static struct spdk_accel_task *
277 _get_task(struct accel_io_channel *accel_ch, spdk_accel_completion_cb cb_fn, void *cb_arg)
278 {
279 	struct spdk_accel_task *accel_task;
280 
281 	accel_task = STAILQ_FIRST(&accel_ch->task_pool);
282 	if (spdk_unlikely(accel_task == NULL)) {
283 		accel_update_stats(accel_ch, retry.task, 1);
284 		return NULL;
285 	}
286 
287 	accel_update_stats(accel_ch, task_outstanding, 1);
288 	STAILQ_REMOVE_HEAD(&accel_ch->task_pool, link);
289 	accel_task->link.stqe_next = NULL;
290 
291 	accel_task->cb_fn = cb_fn;
292 	accel_task->cb_arg = cb_arg;
293 	accel_task->accel_ch = accel_ch;
294 	accel_task->s.iovs = NULL;
295 	accel_task->d.iovs = NULL;
296 
297 	return accel_task;
298 }
299 
300 static void
301 _put_task(struct accel_io_channel *ch, struct spdk_accel_task *task)
302 {
303 	STAILQ_INSERT_HEAD(&ch->task_pool, task, link);
304 	accel_update_stats(ch, task_outstanding, -1);
305 }
306 
307 void
308 spdk_accel_task_complete(struct spdk_accel_task *accel_task, int status)
309 {
310 	struct accel_io_channel		*accel_ch = accel_task->accel_ch;
311 	spdk_accel_completion_cb	cb_fn;
312 	void				*cb_arg;
313 
314 	accel_update_task_stats(accel_ch, accel_task, executed, 1);
315 	accel_update_task_stats(accel_ch, accel_task, num_bytes, accel_task->nbytes);
316 	if (spdk_unlikely(status != 0)) {
317 		accel_update_task_stats(accel_ch, accel_task, failed, 1);
318 	}
319 
320 	if (accel_task->seq) {
321 		accel_sequence_task_cb(accel_task->seq, status);
322 		return;
323 	}
324 
325 	cb_fn = accel_task->cb_fn;
326 	cb_arg = accel_task->cb_arg;
327 
328 	if (accel_task->has_aux) {
329 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task->aux, link);
330 		accel_task->aux = NULL;
331 		accel_task->has_aux = false;
332 	}
333 
334 	/* We should put the accel_task into the list firstly in order to avoid
335 	 * the accel task list is exhausted when there is recursive call to
336 	 * allocate accel_task in user's call back function (cb_fn)
337 	 */
338 	_put_task(accel_ch, accel_task);
339 
340 	cb_fn(cb_arg, status);
341 }
342 
343 static inline int
344 accel_submit_task(struct accel_io_channel *accel_ch, struct spdk_accel_task *task)
345 {
346 	struct spdk_io_channel *module_ch = accel_ch->module_ch[task->op_code];
347 	struct spdk_accel_module_if *module = g_modules_opc[task->op_code].module;
348 	int rc;
349 
350 	rc = module->submit_tasks(module_ch, task);
351 	if (spdk_unlikely(rc != 0)) {
352 		accel_update_task_stats(accel_ch, task, failed, 1);
353 	}
354 
355 	return rc;
356 }
357 
358 static inline uint64_t
359 accel_get_iovlen(struct iovec *iovs, uint32_t iovcnt)
360 {
361 	uint64_t result = 0;
362 	uint32_t i;
363 
364 	for (i = 0; i < iovcnt; ++i) {
365 		result += iovs[i].iov_len;
366 	}
367 
368 	return result;
369 }
370 
371 #define ACCEL_TASK_ALLOC_AUX_BUF(task)						\
372 do {										\
373         (task)->aux = SLIST_FIRST(&(task)->accel_ch->task_aux_data_pool);	\
374         if (spdk_unlikely(!(task)->aux)) {					\
375                 SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");	\
376                 _put_task(task->accel_ch, task);				\
377                 assert(0);							\
378                 return -ENOMEM;							\
379         }									\
380         SLIST_REMOVE_HEAD(&(task)->accel_ch->task_aux_data_pool, link);		\
381         (task)->has_aux = true;							\
382 } while (0)
383 
384 /* Accel framework public API for copy function */
385 int
386 spdk_accel_submit_copy(struct spdk_io_channel *ch, void *dst, void *src,
387 		       uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
388 {
389 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
390 	struct spdk_accel_task *accel_task;
391 
392 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
393 	if (spdk_unlikely(accel_task == NULL)) {
394 		return -ENOMEM;
395 	}
396 
397 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
398 
399 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
400 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
401 	accel_task->d.iovs[0].iov_base = dst;
402 	accel_task->d.iovs[0].iov_len = nbytes;
403 	accel_task->d.iovcnt = 1;
404 	accel_task->s.iovs[0].iov_base = src;
405 	accel_task->s.iovs[0].iov_len = nbytes;
406 	accel_task->s.iovcnt = 1;
407 	accel_task->nbytes = nbytes;
408 	accel_task->op_code = SPDK_ACCEL_OPC_COPY;
409 	accel_task->src_domain = NULL;
410 	accel_task->dst_domain = NULL;
411 
412 	return accel_submit_task(accel_ch, accel_task);
413 }
414 
415 /* Accel framework public API for dual cast copy function */
416 int
417 spdk_accel_submit_dualcast(struct spdk_io_channel *ch, void *dst1,
418 			   void *dst2, void *src, uint64_t nbytes,
419 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
420 {
421 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
422 	struct spdk_accel_task *accel_task;
423 
424 	if ((uintptr_t)dst1 & (ALIGN_4K - 1) || (uintptr_t)dst2 & (ALIGN_4K - 1)) {
425 		SPDK_ERRLOG("Dualcast requires 4K alignment on dst addresses\n");
426 		return -EINVAL;
427 	}
428 
429 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
430 	if (spdk_unlikely(accel_task == NULL)) {
431 		return -ENOMEM;
432 	}
433 
434 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
435 
436 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
437 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
438 	accel_task->d2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST2];
439 	accel_task->d.iovs[0].iov_base = dst1;
440 	accel_task->d.iovs[0].iov_len = nbytes;
441 	accel_task->d.iovcnt = 1;
442 	accel_task->d2.iovs[0].iov_base = dst2;
443 	accel_task->d2.iovs[0].iov_len = nbytes;
444 	accel_task->d2.iovcnt = 1;
445 	accel_task->s.iovs[0].iov_base = src;
446 	accel_task->s.iovs[0].iov_len = nbytes;
447 	accel_task->s.iovcnt = 1;
448 	accel_task->nbytes = nbytes;
449 	accel_task->op_code = SPDK_ACCEL_OPC_DUALCAST;
450 	accel_task->src_domain = NULL;
451 	accel_task->dst_domain = NULL;
452 
453 	return accel_submit_task(accel_ch, accel_task);
454 }
455 
456 /* Accel framework public API for compare function */
457 
458 int
459 spdk_accel_submit_compare(struct spdk_io_channel *ch, void *src1,
460 			  void *src2, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
461 			  void *cb_arg)
462 {
463 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
464 	struct spdk_accel_task *accel_task;
465 
466 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
467 	if (spdk_unlikely(accel_task == NULL)) {
468 		return -ENOMEM;
469 	}
470 
471 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
472 
473 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
474 	accel_task->s2.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC2];
475 	accel_task->s.iovs[0].iov_base = src1;
476 	accel_task->s.iovs[0].iov_len = nbytes;
477 	accel_task->s.iovcnt = 1;
478 	accel_task->s2.iovs[0].iov_base = src2;
479 	accel_task->s2.iovs[0].iov_len = nbytes;
480 	accel_task->s2.iovcnt = 1;
481 	accel_task->nbytes = nbytes;
482 	accel_task->op_code = SPDK_ACCEL_OPC_COMPARE;
483 	accel_task->src_domain = NULL;
484 	accel_task->dst_domain = NULL;
485 
486 	return accel_submit_task(accel_ch, accel_task);
487 }
488 
489 /* Accel framework public API for fill function */
490 int
491 spdk_accel_submit_fill(struct spdk_io_channel *ch, void *dst,
492 		       uint8_t fill, uint64_t nbytes,
493 		       spdk_accel_completion_cb cb_fn, void *cb_arg)
494 {
495 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
496 	struct spdk_accel_task *accel_task;
497 
498 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
499 	if (spdk_unlikely(accel_task == NULL)) {
500 		return -ENOMEM;
501 	}
502 
503 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
504 
505 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
506 	accel_task->d.iovs[0].iov_base = dst;
507 	accel_task->d.iovs[0].iov_len = nbytes;
508 	accel_task->d.iovcnt = 1;
509 	accel_task->nbytes = nbytes;
510 	memset(&accel_task->fill_pattern, fill, sizeof(uint64_t));
511 	accel_task->op_code = SPDK_ACCEL_OPC_FILL;
512 	accel_task->src_domain = NULL;
513 	accel_task->dst_domain = NULL;
514 
515 	return accel_submit_task(accel_ch, accel_task);
516 }
517 
518 /* Accel framework public API for CRC-32C function */
519 int
520 spdk_accel_submit_crc32c(struct spdk_io_channel *ch, uint32_t *crc_dst,
521 			 void *src, uint32_t seed, uint64_t nbytes, spdk_accel_completion_cb cb_fn,
522 			 void *cb_arg)
523 {
524 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
525 	struct spdk_accel_task *accel_task;
526 
527 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
528 	if (spdk_unlikely(accel_task == NULL)) {
529 		return -ENOMEM;
530 	}
531 
532 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
533 
534 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
535 	accel_task->s.iovs[0].iov_base = src;
536 	accel_task->s.iovs[0].iov_len = nbytes;
537 	accel_task->s.iovcnt = 1;
538 	accel_task->nbytes = nbytes;
539 	accel_task->crc_dst = crc_dst;
540 	accel_task->seed = seed;
541 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
542 	accel_task->src_domain = NULL;
543 	accel_task->dst_domain = NULL;
544 
545 	return accel_submit_task(accel_ch, accel_task);
546 }
547 
548 /* Accel framework public API for chained CRC-32C function */
549 int
550 spdk_accel_submit_crc32cv(struct spdk_io_channel *ch, uint32_t *crc_dst,
551 			  struct iovec *iov, uint32_t iov_cnt, uint32_t seed,
552 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
553 {
554 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
555 	struct spdk_accel_task *accel_task;
556 
557 	if (iov == NULL) {
558 		SPDK_ERRLOG("iov should not be NULL");
559 		return -EINVAL;
560 	}
561 
562 	if (!iov_cnt) {
563 		SPDK_ERRLOG("iovcnt should not be zero value\n");
564 		return -EINVAL;
565 	}
566 
567 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
568 	if (spdk_unlikely(accel_task == NULL)) {
569 		SPDK_ERRLOG("no memory\n");
570 		assert(0);
571 		return -ENOMEM;
572 	}
573 
574 	accel_task->s.iovs = iov;
575 	accel_task->s.iovcnt = iov_cnt;
576 	accel_task->nbytes = accel_get_iovlen(iov, iov_cnt);
577 	accel_task->crc_dst = crc_dst;
578 	accel_task->seed = seed;
579 	accel_task->op_code = SPDK_ACCEL_OPC_CRC32C;
580 	accel_task->src_domain = NULL;
581 	accel_task->dst_domain = NULL;
582 
583 	return accel_submit_task(accel_ch, accel_task);
584 }
585 
586 /* Accel framework public API for copy with CRC-32C function */
587 int
588 spdk_accel_submit_copy_crc32c(struct spdk_io_channel *ch, void *dst,
589 			      void *src, uint32_t *crc_dst, uint32_t seed, uint64_t nbytes,
590 			      spdk_accel_completion_cb cb_fn, void *cb_arg)
591 {
592 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
593 	struct spdk_accel_task *accel_task;
594 
595 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
596 	if (spdk_unlikely(accel_task == NULL)) {
597 		return -ENOMEM;
598 	}
599 
600 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
601 
602 	accel_task->s.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_SRC];
603 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
604 	accel_task->d.iovs[0].iov_base = dst;
605 	accel_task->d.iovs[0].iov_len = nbytes;
606 	accel_task->d.iovcnt = 1;
607 	accel_task->s.iovs[0].iov_base = src;
608 	accel_task->s.iovs[0].iov_len = nbytes;
609 	accel_task->s.iovcnt = 1;
610 	accel_task->nbytes = nbytes;
611 	accel_task->crc_dst = crc_dst;
612 	accel_task->seed = seed;
613 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
614 	accel_task->src_domain = NULL;
615 	accel_task->dst_domain = NULL;
616 
617 	return accel_submit_task(accel_ch, accel_task);
618 }
619 
620 /* Accel framework public API for chained copy + CRC-32C function */
621 int
622 spdk_accel_submit_copy_crc32cv(struct spdk_io_channel *ch, void *dst,
623 			       struct iovec *src_iovs, uint32_t iov_cnt, uint32_t *crc_dst,
624 			       uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg)
625 {
626 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
627 	struct spdk_accel_task *accel_task;
628 	uint64_t nbytes;
629 
630 	if (src_iovs == NULL) {
631 		SPDK_ERRLOG("iov should not be NULL");
632 		return -EINVAL;
633 	}
634 
635 	if (!iov_cnt) {
636 		SPDK_ERRLOG("iovcnt should not be zero value\n");
637 		return -EINVAL;
638 	}
639 
640 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
641 	if (spdk_unlikely(accel_task == NULL)) {
642 		SPDK_ERRLOG("no memory\n");
643 		assert(0);
644 		return -ENOMEM;
645 	}
646 
647 	nbytes = accel_get_iovlen(src_iovs, iov_cnt);
648 
649 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
650 
651 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
652 	accel_task->d.iovs[0].iov_base = dst;
653 	accel_task->d.iovs[0].iov_len = nbytes;
654 	accel_task->d.iovcnt = 1;
655 	accel_task->s.iovs = src_iovs;
656 	accel_task->s.iovcnt = iov_cnt;
657 	accel_task->nbytes = nbytes;
658 	accel_task->crc_dst = crc_dst;
659 	accel_task->seed = seed;
660 	accel_task->op_code = SPDK_ACCEL_OPC_COPY_CRC32C;
661 	accel_task->src_domain = NULL;
662 	accel_task->dst_domain = NULL;
663 
664 	return accel_submit_task(accel_ch, accel_task);
665 }
666 
667 int
668 spdk_accel_submit_compress(struct spdk_io_channel *ch, void *dst, uint64_t nbytes,
669 			   struct iovec *src_iovs, size_t src_iovcnt, uint32_t *output_size,
670 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
671 {
672 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
673 	struct spdk_accel_task *accel_task;
674 
675 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
676 	if (spdk_unlikely(accel_task == NULL)) {
677 		return -ENOMEM;
678 	}
679 
680 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
681 
682 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
683 	accel_task->d.iovs[0].iov_base = dst;
684 	accel_task->d.iovs[0].iov_len = nbytes;
685 	accel_task->d.iovcnt = 1;
686 	accel_task->output_size = output_size;
687 	accel_task->s.iovs = src_iovs;
688 	accel_task->s.iovcnt = src_iovcnt;
689 	accel_task->nbytes = nbytes;
690 	accel_task->op_code = SPDK_ACCEL_OPC_COMPRESS;
691 	accel_task->src_domain = NULL;
692 	accel_task->dst_domain = NULL;
693 
694 	return accel_submit_task(accel_ch, accel_task);
695 }
696 
697 int
698 spdk_accel_submit_decompress(struct spdk_io_channel *ch, struct iovec *dst_iovs,
699 			     size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
700 			     uint32_t *output_size, spdk_accel_completion_cb cb_fn,
701 			     void *cb_arg)
702 {
703 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
704 	struct spdk_accel_task *accel_task;
705 
706 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
707 	if (spdk_unlikely(accel_task == NULL)) {
708 		return -ENOMEM;
709 	}
710 
711 	accel_task->output_size = output_size;
712 	accel_task->s.iovs = src_iovs;
713 	accel_task->s.iovcnt = src_iovcnt;
714 	accel_task->d.iovs = dst_iovs;
715 	accel_task->d.iovcnt = dst_iovcnt;
716 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
717 	accel_task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
718 	accel_task->src_domain = NULL;
719 	accel_task->dst_domain = NULL;
720 
721 	return accel_submit_task(accel_ch, accel_task);
722 }
723 
724 int
725 spdk_accel_submit_encrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
726 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
727 			  struct iovec *src_iovs, uint32_t src_iovcnt,
728 			  uint64_t iv, uint32_t block_size,
729 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
730 {
731 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
732 	struct spdk_accel_task *accel_task;
733 
734 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
735 		return -EINVAL;
736 	}
737 
738 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
739 	if (spdk_unlikely(accel_task == NULL)) {
740 		return -ENOMEM;
741 	}
742 
743 	accel_task->crypto_key = key;
744 	accel_task->s.iovs = src_iovs;
745 	accel_task->s.iovcnt = src_iovcnt;
746 	accel_task->d.iovs = dst_iovs;
747 	accel_task->d.iovcnt = dst_iovcnt;
748 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
749 	accel_task->iv = iv;
750 	accel_task->block_size = block_size;
751 	accel_task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
752 	accel_task->src_domain = NULL;
753 	accel_task->dst_domain = NULL;
754 
755 	return accel_submit_task(accel_ch, accel_task);
756 }
757 
758 int
759 spdk_accel_submit_decrypt(struct spdk_io_channel *ch, struct spdk_accel_crypto_key *key,
760 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
761 			  struct iovec *src_iovs, uint32_t src_iovcnt,
762 			  uint64_t iv, uint32_t block_size,
763 			  spdk_accel_completion_cb cb_fn, void *cb_arg)
764 {
765 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
766 	struct spdk_accel_task *accel_task;
767 
768 	if (spdk_unlikely(!dst_iovs || !dst_iovcnt || !src_iovs || !src_iovcnt || !key || !block_size)) {
769 		return -EINVAL;
770 	}
771 
772 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
773 	if (spdk_unlikely(accel_task == NULL)) {
774 		return -ENOMEM;
775 	}
776 
777 	accel_task->crypto_key = key;
778 	accel_task->s.iovs = src_iovs;
779 	accel_task->s.iovcnt = src_iovcnt;
780 	accel_task->d.iovs = dst_iovs;
781 	accel_task->d.iovcnt = dst_iovcnt;
782 	accel_task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
783 	accel_task->iv = iv;
784 	accel_task->block_size = block_size;
785 	accel_task->op_code = SPDK_ACCEL_OPC_DECRYPT;
786 	accel_task->src_domain = NULL;
787 	accel_task->dst_domain = NULL;
788 
789 	return accel_submit_task(accel_ch, accel_task);
790 }
791 
792 int
793 spdk_accel_submit_xor(struct spdk_io_channel *ch, void *dst, void **sources, uint32_t nsrcs,
794 		      uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg)
795 {
796 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
797 	struct spdk_accel_task *accel_task;
798 
799 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
800 	if (spdk_unlikely(accel_task == NULL)) {
801 		return -ENOMEM;
802 	}
803 
804 	ACCEL_TASK_ALLOC_AUX_BUF(accel_task);
805 
806 	accel_task->d.iovs = &accel_task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
807 	accel_task->nsrcs.srcs = sources;
808 	accel_task->nsrcs.cnt = nsrcs;
809 	accel_task->d.iovs[0].iov_base = dst;
810 	accel_task->d.iovs[0].iov_len = nbytes;
811 	accel_task->d.iovcnt = 1;
812 	accel_task->nbytes = nbytes;
813 	accel_task->op_code = SPDK_ACCEL_OPC_XOR;
814 	accel_task->src_domain = NULL;
815 	accel_task->dst_domain = NULL;
816 
817 	return accel_submit_task(accel_ch, accel_task);
818 }
819 
820 int
821 spdk_accel_submit_dif_verify(struct spdk_io_channel *ch,
822 			     struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
823 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
824 			     spdk_accel_completion_cb cb_fn, void *cb_arg)
825 {
826 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
827 	struct spdk_accel_task *accel_task;
828 
829 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
830 	if (accel_task == NULL) {
831 		return -ENOMEM;
832 	}
833 
834 	accel_task->s.iovs = iovs;
835 	accel_task->s.iovcnt = iovcnt;
836 	accel_task->dif.ctx = ctx;
837 	accel_task->dif.err = err;
838 	accel_task->dif.num_blocks = num_blocks;
839 	accel_task->nbytes = num_blocks * ctx->block_size;
840 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
841 	accel_task->src_domain = NULL;
842 	accel_task->dst_domain = NULL;
843 
844 	return accel_submit_task(accel_ch, accel_task);
845 }
846 
847 int
848 spdk_accel_submit_dif_generate(struct spdk_io_channel *ch,
849 			       struct iovec *iovs, size_t iovcnt, uint32_t num_blocks,
850 			       const struct spdk_dif_ctx *ctx,
851 			       spdk_accel_completion_cb cb_fn, void *cb_arg)
852 {
853 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
854 	struct spdk_accel_task *accel_task;
855 
856 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
857 	if (accel_task == NULL) {
858 		return -ENOMEM;
859 	}
860 
861 	accel_task->s.iovs = iovs;
862 	accel_task->s.iovcnt = iovcnt;
863 	accel_task->dif.ctx = ctx;
864 	accel_task->dif.num_blocks = num_blocks;
865 	accel_task->nbytes = num_blocks * ctx->block_size;
866 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
867 	accel_task->src_domain = NULL;
868 	accel_task->dst_domain = NULL;
869 
870 	return accel_submit_task(accel_ch, accel_task);
871 }
872 
873 int
874 spdk_accel_submit_dif_generate_copy(struct spdk_io_channel *ch, struct iovec *dst_iovs,
875 				    size_t dst_iovcnt, struct iovec *src_iovs, size_t src_iovcnt,
876 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
877 				    spdk_accel_completion_cb cb_fn, void *cb_arg)
878 {
879 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
880 	struct spdk_accel_task *accel_task;
881 
882 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
883 	if (accel_task == NULL) {
884 		return -ENOMEM;
885 	}
886 
887 	accel_task->s.iovs = src_iovs;
888 	accel_task->s.iovcnt = src_iovcnt;
889 	accel_task->d.iovs = dst_iovs;
890 	accel_task->d.iovcnt = dst_iovcnt;
891 	accel_task->dif.ctx = ctx;
892 	accel_task->dif.num_blocks = num_blocks;
893 	accel_task->nbytes = num_blocks * ctx->block_size;
894 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
895 	accel_task->src_domain = NULL;
896 	accel_task->dst_domain = NULL;
897 
898 	return accel_submit_task(accel_ch, accel_task);
899 }
900 
901 int
902 spdk_accel_submit_dif_verify_copy(struct spdk_io_channel *ch,
903 				  struct iovec *dst_iovs, size_t dst_iovcnt,
904 				  struct iovec *src_iovs, size_t src_iovcnt, uint32_t num_blocks,
905 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
906 				  spdk_accel_completion_cb cb_fn, void *cb_arg)
907 {
908 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
909 	struct spdk_accel_task *accel_task;
910 
911 	accel_task = _get_task(accel_ch, cb_fn, cb_arg);
912 	if (accel_task == NULL) {
913 		return -ENOMEM;
914 	}
915 
916 	accel_task->s.iovs = src_iovs;
917 	accel_task->s.iovcnt = src_iovcnt;
918 	accel_task->d.iovs = dst_iovs;
919 	accel_task->d.iovcnt = dst_iovcnt;
920 	accel_task->dif.ctx = ctx;
921 	accel_task->dif.err = err;
922 	accel_task->dif.num_blocks = num_blocks;
923 	accel_task->nbytes = num_blocks * ctx->block_size;
924 	accel_task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
925 	accel_task->src_domain = NULL;
926 	accel_task->dst_domain = NULL;
927 
928 	return accel_submit_task(accel_ch, accel_task);
929 }
930 
931 static inline struct accel_buffer *
932 accel_get_buf(struct accel_io_channel *ch, uint64_t len)
933 {
934 	struct accel_buffer *buf;
935 
936 	buf = SLIST_FIRST(&ch->buf_pool);
937 	if (spdk_unlikely(buf == NULL)) {
938 		accel_update_stats(ch, retry.bufdesc, 1);
939 		return NULL;
940 	}
941 
942 	SLIST_REMOVE_HEAD(&ch->buf_pool, link);
943 	buf->len = len;
944 	buf->buf = NULL;
945 	buf->seq = NULL;
946 	buf->cb_fn = NULL;
947 
948 	return buf;
949 }
950 
951 static inline void
952 accel_put_buf(struct accel_io_channel *ch, struct accel_buffer *buf)
953 {
954 	if (buf->buf != NULL) {
955 		spdk_iobuf_put(&ch->iobuf, buf->buf, buf->len);
956 	}
957 
958 	SLIST_INSERT_HEAD(&ch->buf_pool, buf, link);
959 }
960 
961 static inline struct spdk_accel_sequence *
962 accel_sequence_get(struct accel_io_channel *ch)
963 {
964 	struct spdk_accel_sequence *seq;
965 
966 	assert(g_opts.task_count >= ch->stats.task_outstanding);
967 
968 	/* Sequence cannot be allocated if number of available task objects cannot satisfy required limit.
969 	 * This is to prevent potential dead lock when few requests are pending task resource and none can
970 	 * advance the processing. This solution should work only if there is single async operation after
971 	 * sequence obj obtained, so assume that is possible to happen with io buffer allocation now, if
972 	 * there are more async operations then solution should be improved. */
973 	if (spdk_unlikely(g_opts.task_count - ch->stats.task_outstanding < ACCEL_TASKS_IN_SEQUENCE_LIMIT)) {
974 		return NULL;
975 	}
976 
977 	seq = SLIST_FIRST(&ch->seq_pool);
978 	if (spdk_unlikely(seq == NULL)) {
979 		accel_update_stats(ch, retry.sequence, 1);
980 		return NULL;
981 	}
982 
983 	accel_update_stats(ch, sequence_outstanding, 1);
984 	SLIST_REMOVE_HEAD(&ch->seq_pool, link);
985 
986 	TAILQ_INIT(&seq->tasks);
987 	SLIST_INIT(&seq->bounce_bufs);
988 
989 	seq->ch = ch;
990 	seq->status = 0;
991 	seq->state = ACCEL_SEQUENCE_STATE_INIT;
992 	seq->in_process_sequence = false;
993 
994 	return seq;
995 }
996 
997 static inline void
998 accel_sequence_put(struct spdk_accel_sequence *seq)
999 {
1000 	struct accel_io_channel *ch = seq->ch;
1001 	struct accel_buffer *buf;
1002 
1003 	while (!SLIST_EMPTY(&seq->bounce_bufs)) {
1004 		buf = SLIST_FIRST(&seq->bounce_bufs);
1005 		SLIST_REMOVE_HEAD(&seq->bounce_bufs, link);
1006 		accel_put_buf(seq->ch, buf);
1007 	}
1008 
1009 	assert(TAILQ_EMPTY(&seq->tasks));
1010 	seq->ch = NULL;
1011 
1012 	SLIST_INSERT_HEAD(&ch->seq_pool, seq, link);
1013 	accel_update_stats(ch, sequence_outstanding, -1);
1014 }
1015 
1016 static void accel_sequence_task_cb(void *cb_arg, int status);
1017 
1018 static inline struct spdk_accel_task *
1019 accel_sequence_get_task(struct accel_io_channel *ch, struct spdk_accel_sequence *seq,
1020 			spdk_accel_step_cb cb_fn, void *cb_arg)
1021 {
1022 	struct spdk_accel_task *task;
1023 
1024 	task = _get_task(ch, NULL, NULL);
1025 	if (spdk_unlikely(task == NULL)) {
1026 		return task;
1027 	}
1028 
1029 	task->step_cb_fn = cb_fn;
1030 	task->cb_arg = cb_arg;
1031 	task->seq = seq;
1032 
1033 	return task;
1034 }
1035 
1036 int
1037 spdk_accel_append_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1038 		       struct iovec *dst_iovs, uint32_t dst_iovcnt,
1039 		       struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1040 		       struct iovec *src_iovs, uint32_t src_iovcnt,
1041 		       struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1042 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1043 {
1044 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1045 	struct spdk_accel_task *task;
1046 	struct spdk_accel_sequence *seq = *pseq;
1047 
1048 	if (seq == NULL) {
1049 		seq = accel_sequence_get(accel_ch);
1050 		if (spdk_unlikely(seq == NULL)) {
1051 			return -ENOMEM;
1052 		}
1053 	}
1054 
1055 	assert(seq->ch == accel_ch);
1056 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1057 	if (spdk_unlikely(task == NULL)) {
1058 		if (*pseq == NULL) {
1059 			accel_sequence_put(seq);
1060 		}
1061 
1062 		return -ENOMEM;
1063 	}
1064 
1065 	task->dst_domain = dst_domain;
1066 	task->dst_domain_ctx = dst_domain_ctx;
1067 	task->d.iovs = dst_iovs;
1068 	task->d.iovcnt = dst_iovcnt;
1069 	task->src_domain = src_domain;
1070 	task->src_domain_ctx = src_domain_ctx;
1071 	task->s.iovs = src_iovs;
1072 	task->s.iovcnt = src_iovcnt;
1073 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1074 	task->op_code = SPDK_ACCEL_OPC_COPY;
1075 
1076 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1077 	*pseq = seq;
1078 
1079 	return 0;
1080 }
1081 
1082 int
1083 spdk_accel_append_fill(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1084 		       void *buf, uint64_t len,
1085 		       struct spdk_memory_domain *domain, void *domain_ctx, uint8_t pattern,
1086 		       spdk_accel_step_cb cb_fn, void *cb_arg)
1087 {
1088 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1089 	struct spdk_accel_task *task;
1090 	struct spdk_accel_sequence *seq = *pseq;
1091 
1092 	if (seq == NULL) {
1093 		seq = accel_sequence_get(accel_ch);
1094 		if (spdk_unlikely(seq == NULL)) {
1095 			return -ENOMEM;
1096 		}
1097 	}
1098 
1099 	assert(seq->ch == accel_ch);
1100 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1101 	if (spdk_unlikely(task == NULL)) {
1102 		if (*pseq == NULL) {
1103 			accel_sequence_put(seq);
1104 		}
1105 
1106 		return -ENOMEM;
1107 	}
1108 
1109 	memset(&task->fill_pattern, pattern, sizeof(uint64_t));
1110 
1111 	task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1112 	if (spdk_unlikely(!task->aux)) {
1113 		SPDK_ERRLOG("Fatal problem, aux data was not allocated\n");
1114 		if (*pseq == NULL) {
1115 			accel_sequence_put((seq));
1116 		}
1117 
1118 		task->seq = NULL;
1119 		_put_task(task->accel_ch, task);
1120 		assert(0);
1121 		return -ENOMEM;
1122 	}
1123 	SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1124 	task->has_aux = true;
1125 
1126 	task->d.iovs = &task->aux->iovs[SPDK_ACCEL_AUX_IOV_DST];
1127 	task->d.iovs[0].iov_base = buf;
1128 	task->d.iovs[0].iov_len = len;
1129 	task->d.iovcnt = 1;
1130 	task->nbytes = len;
1131 	task->src_domain = NULL;
1132 	task->dst_domain = domain;
1133 	task->dst_domain_ctx = domain_ctx;
1134 	task->op_code = SPDK_ACCEL_OPC_FILL;
1135 
1136 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1137 	*pseq = seq;
1138 
1139 	return 0;
1140 }
1141 
1142 int
1143 spdk_accel_append_decompress(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1144 			     struct iovec *dst_iovs, size_t dst_iovcnt,
1145 			     struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1146 			     struct iovec *src_iovs, size_t src_iovcnt,
1147 			     struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1148 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1149 {
1150 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1151 	struct spdk_accel_task *task;
1152 	struct spdk_accel_sequence *seq = *pseq;
1153 
1154 	if (seq == NULL) {
1155 		seq = accel_sequence_get(accel_ch);
1156 		if (spdk_unlikely(seq == NULL)) {
1157 			return -ENOMEM;
1158 		}
1159 	}
1160 
1161 	assert(seq->ch == accel_ch);
1162 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1163 	if (spdk_unlikely(task == NULL)) {
1164 		if (*pseq == NULL) {
1165 			accel_sequence_put(seq);
1166 		}
1167 
1168 		return -ENOMEM;
1169 	}
1170 
1171 	/* TODO: support output_size for chaining */
1172 	task->output_size = NULL;
1173 	task->dst_domain = dst_domain;
1174 	task->dst_domain_ctx = dst_domain_ctx;
1175 	task->d.iovs = dst_iovs;
1176 	task->d.iovcnt = dst_iovcnt;
1177 	task->src_domain = src_domain;
1178 	task->src_domain_ctx = src_domain_ctx;
1179 	task->s.iovs = src_iovs;
1180 	task->s.iovcnt = src_iovcnt;
1181 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1182 	task->op_code = SPDK_ACCEL_OPC_DECOMPRESS;
1183 
1184 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1185 	*pseq = seq;
1186 
1187 	return 0;
1188 }
1189 
1190 int
1191 spdk_accel_append_encrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1192 			  struct spdk_accel_crypto_key *key,
1193 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1194 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1195 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1196 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1197 			  uint64_t iv, uint32_t block_size,
1198 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1199 {
1200 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1201 	struct spdk_accel_task *task;
1202 	struct spdk_accel_sequence *seq = *pseq;
1203 
1204 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1205 
1206 	if (seq == NULL) {
1207 		seq = accel_sequence_get(accel_ch);
1208 		if (spdk_unlikely(seq == NULL)) {
1209 			return -ENOMEM;
1210 		}
1211 	}
1212 
1213 	assert(seq->ch == accel_ch);
1214 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1215 	if (spdk_unlikely(task == NULL)) {
1216 		if (*pseq == NULL) {
1217 			accel_sequence_put(seq);
1218 		}
1219 
1220 		return -ENOMEM;
1221 	}
1222 
1223 	task->crypto_key = key;
1224 	task->src_domain = src_domain;
1225 	task->src_domain_ctx = src_domain_ctx;
1226 	task->s.iovs = src_iovs;
1227 	task->s.iovcnt = src_iovcnt;
1228 	task->dst_domain = dst_domain;
1229 	task->dst_domain_ctx = dst_domain_ctx;
1230 	task->d.iovs = dst_iovs;
1231 	task->d.iovcnt = dst_iovcnt;
1232 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1233 	task->iv = iv;
1234 	task->block_size = block_size;
1235 	task->op_code = SPDK_ACCEL_OPC_ENCRYPT;
1236 
1237 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1238 	*pseq = seq;
1239 
1240 	return 0;
1241 }
1242 
1243 int
1244 spdk_accel_append_decrypt(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1245 			  struct spdk_accel_crypto_key *key,
1246 			  struct iovec *dst_iovs, uint32_t dst_iovcnt,
1247 			  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1248 			  struct iovec *src_iovs, uint32_t src_iovcnt,
1249 			  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1250 			  uint64_t iv, uint32_t block_size,
1251 			  spdk_accel_step_cb cb_fn, void *cb_arg)
1252 {
1253 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1254 	struct spdk_accel_task *task;
1255 	struct spdk_accel_sequence *seq = *pseq;
1256 
1257 	assert(dst_iovs && dst_iovcnt && src_iovs && src_iovcnt && key && block_size);
1258 
1259 	if (seq == NULL) {
1260 		seq = accel_sequence_get(accel_ch);
1261 		if (spdk_unlikely(seq == NULL)) {
1262 			return -ENOMEM;
1263 		}
1264 	}
1265 
1266 	assert(seq->ch == accel_ch);
1267 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1268 	if (spdk_unlikely(task == NULL)) {
1269 		if (*pseq == NULL) {
1270 			accel_sequence_put(seq);
1271 		}
1272 
1273 		return -ENOMEM;
1274 	}
1275 
1276 	task->crypto_key = key;
1277 	task->src_domain = src_domain;
1278 	task->src_domain_ctx = src_domain_ctx;
1279 	task->s.iovs = src_iovs;
1280 	task->s.iovcnt = src_iovcnt;
1281 	task->dst_domain = dst_domain;
1282 	task->dst_domain_ctx = dst_domain_ctx;
1283 	task->d.iovs = dst_iovs;
1284 	task->d.iovcnt = dst_iovcnt;
1285 	task->nbytes = accel_get_iovlen(src_iovs, src_iovcnt);
1286 	task->iv = iv;
1287 	task->block_size = block_size;
1288 	task->op_code = SPDK_ACCEL_OPC_DECRYPT;
1289 
1290 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1291 	*pseq = seq;
1292 
1293 	return 0;
1294 }
1295 
1296 int
1297 spdk_accel_append_crc32c(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1298 			 uint32_t *dst, struct iovec *iovs, uint32_t iovcnt,
1299 			 struct spdk_memory_domain *domain, void *domain_ctx,
1300 			 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg)
1301 {
1302 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1303 	struct spdk_accel_task *task;
1304 	struct spdk_accel_sequence *seq = *pseq;
1305 
1306 	if (seq == NULL) {
1307 		seq = accel_sequence_get(accel_ch);
1308 		if (spdk_unlikely(seq == NULL)) {
1309 			return -ENOMEM;
1310 		}
1311 	}
1312 
1313 	assert(seq->ch == accel_ch);
1314 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1315 	if (spdk_unlikely(task == NULL)) {
1316 		if (*pseq == NULL) {
1317 			accel_sequence_put(seq);
1318 		}
1319 
1320 		return -ENOMEM;
1321 	}
1322 
1323 	task->s.iovs = iovs;
1324 	task->s.iovcnt = iovcnt;
1325 	task->src_domain = domain;
1326 	task->src_domain_ctx = domain_ctx;
1327 	task->nbytes = accel_get_iovlen(iovs, iovcnt);
1328 	task->crc_dst = dst;
1329 	task->seed = seed;
1330 	task->op_code = SPDK_ACCEL_OPC_CRC32C;
1331 	task->dst_domain = NULL;
1332 
1333 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1334 	*pseq = seq;
1335 
1336 	return 0;
1337 }
1338 
1339 int
1340 spdk_accel_append_dif_verify(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1341 			     struct iovec *iovs, size_t iovcnt,
1342 			     struct spdk_memory_domain *domain, void *domain_ctx,
1343 			     uint32_t num_blocks,
1344 			     const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1345 			     spdk_accel_step_cb cb_fn, void *cb_arg)
1346 {
1347 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1348 	struct spdk_accel_task *task;
1349 	struct spdk_accel_sequence *seq = *pseq;
1350 
1351 	if (seq == NULL) {
1352 		seq = accel_sequence_get(accel_ch);
1353 		if (spdk_unlikely(seq == NULL)) {
1354 			return -ENOMEM;
1355 		}
1356 	}
1357 
1358 	assert(seq->ch == accel_ch);
1359 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1360 	if (spdk_unlikely(task == NULL)) {
1361 		if (*pseq == NULL) {
1362 			accel_sequence_put(seq);
1363 		}
1364 
1365 		return -ENOMEM;
1366 	}
1367 
1368 	task->s.iovs = iovs;
1369 	task->s.iovcnt = iovcnt;
1370 	task->src_domain = domain;
1371 	task->src_domain_ctx = domain_ctx;
1372 	task->dst_domain = NULL;
1373 	task->dif.ctx = ctx;
1374 	task->dif.err = err;
1375 	task->dif.num_blocks = num_blocks;
1376 	task->nbytes = num_blocks * ctx->block_size;
1377 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY;
1378 
1379 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1380 	*pseq = seq;
1381 
1382 	return 0;
1383 }
1384 
1385 int
1386 spdk_accel_append_dif_verify_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1387 				  struct iovec *dst_iovs, size_t dst_iovcnt,
1388 				  struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1389 				  struct iovec *src_iovs, size_t src_iovcnt,
1390 				  struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1391 				  uint32_t num_blocks,
1392 				  const struct spdk_dif_ctx *ctx, struct spdk_dif_error *err,
1393 				  spdk_accel_step_cb cb_fn, void *cb_arg)
1394 {
1395 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1396 	struct spdk_accel_task *task;
1397 	struct spdk_accel_sequence *seq = *pseq;
1398 
1399 	if (seq == NULL) {
1400 		seq = accel_sequence_get(accel_ch);
1401 		if (spdk_unlikely(seq == NULL)) {
1402 			return -ENOMEM;
1403 		}
1404 	}
1405 
1406 	assert(seq->ch == accel_ch);
1407 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1408 	if (spdk_unlikely(task == NULL)) {
1409 		if (*pseq == NULL) {
1410 			accel_sequence_put(seq);
1411 		}
1412 
1413 		return -ENOMEM;
1414 	}
1415 
1416 	task->dst_domain = dst_domain;
1417 	task->dst_domain_ctx = dst_domain_ctx;
1418 	task->d.iovs = dst_iovs;
1419 	task->d.iovcnt = dst_iovcnt;
1420 	task->src_domain = src_domain;
1421 	task->src_domain_ctx = src_domain_ctx;
1422 	task->s.iovs = src_iovs;
1423 	task->s.iovcnt = src_iovcnt;
1424 	task->dif.ctx = ctx;
1425 	task->dif.err = err;
1426 	task->dif.num_blocks = num_blocks;
1427 	task->nbytes = num_blocks * ctx->block_size;
1428 	task->op_code = SPDK_ACCEL_OPC_DIF_VERIFY_COPY;
1429 
1430 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1431 	*pseq = seq;
1432 
1433 	return 0;
1434 }
1435 
1436 int
1437 spdk_accel_append_dif_generate(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1438 			       struct iovec *iovs, size_t iovcnt,
1439 			       struct spdk_memory_domain *domain, void *domain_ctx,
1440 			       uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1441 			       spdk_accel_step_cb cb_fn, void *cb_arg)
1442 {
1443 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1444 	struct spdk_accel_task *task;
1445 	struct spdk_accel_sequence *seq = *pseq;
1446 
1447 	if (seq == NULL) {
1448 		seq = accel_sequence_get(accel_ch);
1449 		if (spdk_unlikely(seq == NULL)) {
1450 			return -ENOMEM;
1451 		}
1452 	}
1453 
1454 	assert(seq->ch == accel_ch);
1455 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1456 	if (spdk_unlikely(task == NULL)) {
1457 		if (*pseq == NULL) {
1458 			accel_sequence_put(seq);
1459 		}
1460 
1461 		return -ENOMEM;
1462 	}
1463 
1464 	task->s.iovs = iovs;
1465 	task->s.iovcnt = iovcnt;
1466 	task->src_domain = domain;
1467 	task->src_domain_ctx = domain_ctx;
1468 	task->dst_domain = NULL;
1469 	task->dif.ctx = ctx;
1470 	task->dif.num_blocks = num_blocks;
1471 	task->nbytes = num_blocks * ctx->block_size;
1472 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE;
1473 
1474 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1475 	*pseq = seq;
1476 
1477 	return 0;
1478 }
1479 
1480 int
1481 spdk_accel_append_dif_generate_copy(struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch,
1482 				    struct iovec *dst_iovs, size_t dst_iovcnt,
1483 				    struct spdk_memory_domain *dst_domain, void *dst_domain_ctx,
1484 				    struct iovec *src_iovs, size_t src_iovcnt,
1485 				    struct spdk_memory_domain *src_domain, void *src_domain_ctx,
1486 				    uint32_t num_blocks, const struct spdk_dif_ctx *ctx,
1487 				    spdk_accel_step_cb cb_fn, void *cb_arg)
1488 {
1489 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1490 	struct spdk_accel_task *task;
1491 	struct spdk_accel_sequence *seq = *pseq;
1492 
1493 	if (seq == NULL) {
1494 		seq = accel_sequence_get(accel_ch);
1495 		if (spdk_unlikely(seq == NULL)) {
1496 			return -ENOMEM;
1497 		}
1498 	}
1499 
1500 	assert(seq->ch == accel_ch);
1501 	task = accel_sequence_get_task(accel_ch, seq, cb_fn, cb_arg);
1502 	if (spdk_unlikely(task == NULL)) {
1503 		if (*pseq == NULL) {
1504 			accel_sequence_put(seq);
1505 		}
1506 
1507 		return -ENOMEM;
1508 	}
1509 
1510 	task->dst_domain = dst_domain;
1511 	task->dst_domain_ctx = dst_domain_ctx;
1512 	task->d.iovs = dst_iovs;
1513 	task->d.iovcnt = dst_iovcnt;
1514 	task->src_domain = src_domain;
1515 	task->src_domain_ctx = src_domain_ctx;
1516 	task->s.iovs = src_iovs;
1517 	task->s.iovcnt = src_iovcnt;
1518 	task->nbytes = num_blocks * ctx->block_size;
1519 	task->op_code = SPDK_ACCEL_OPC_DIF_GENERATE_COPY;
1520 
1521 	TAILQ_INSERT_TAIL(&seq->tasks, task, seq_link);
1522 	*pseq = seq;
1523 
1524 	return 0;
1525 }
1526 
1527 int
1528 spdk_accel_get_buf(struct spdk_io_channel *ch, uint64_t len, void **buf,
1529 		   struct spdk_memory_domain **domain, void **domain_ctx)
1530 {
1531 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1532 	struct accel_buffer *accel_buf;
1533 
1534 	accel_buf = accel_get_buf(accel_ch, len);
1535 	if (spdk_unlikely(accel_buf == NULL)) {
1536 		return -ENOMEM;
1537 	}
1538 
1539 	accel_buf->ch = accel_ch;
1540 
1541 	/* We always return the same pointer and identify the buffers through domain_ctx */
1542 	*buf = ACCEL_BUFFER_BASE;
1543 	*domain_ctx = accel_buf;
1544 	*domain = g_accel_domain;
1545 
1546 	return 0;
1547 }
1548 
1549 void
1550 spdk_accel_put_buf(struct spdk_io_channel *ch, void *buf,
1551 		   struct spdk_memory_domain *domain, void *domain_ctx)
1552 {
1553 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
1554 	struct accel_buffer *accel_buf = domain_ctx;
1555 
1556 	assert(domain == g_accel_domain);
1557 	assert(buf == ACCEL_BUFFER_BASE);
1558 
1559 	accel_put_buf(accel_ch, accel_buf);
1560 }
1561 
1562 static void
1563 accel_sequence_complete_task(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1564 {
1565 	struct accel_io_channel *ch = seq->ch;
1566 	spdk_accel_step_cb cb_fn;
1567 	void *cb_arg;
1568 
1569 	TAILQ_REMOVE(&seq->tasks, task, seq_link);
1570 	cb_fn = task->step_cb_fn;
1571 	cb_arg = task->cb_arg;
1572 	task->seq = NULL;
1573 	if (task->has_aux) {
1574 		SLIST_INSERT_HEAD(&ch->task_aux_data_pool, task->aux, link);
1575 		task->aux = NULL;
1576 		task->has_aux = false;
1577 	}
1578 
1579 	_put_task(ch, task);
1580 
1581 	if (cb_fn != NULL) {
1582 		cb_fn(cb_arg);
1583 	}
1584 }
1585 
1586 static void
1587 accel_sequence_complete_tasks(struct spdk_accel_sequence *seq)
1588 {
1589 	struct spdk_accel_task *task;
1590 
1591 	while (!TAILQ_EMPTY(&seq->tasks)) {
1592 		task = TAILQ_FIRST(&seq->tasks);
1593 		accel_sequence_complete_task(seq, task);
1594 	}
1595 }
1596 
1597 static void
1598 accel_sequence_complete(struct spdk_accel_sequence *seq)
1599 {
1600 	spdk_accel_completion_cb cb_fn = seq->cb_fn;
1601 	void *cb_arg = seq->cb_arg;
1602 	int status = seq->status;
1603 
1604 	SPDK_DEBUGLOG(accel, "Completed sequence: %p with status: %d\n", seq, status);
1605 
1606 	accel_update_stats(seq->ch, sequence_executed, 1);
1607 	if (spdk_unlikely(status != 0)) {
1608 		accel_update_stats(seq->ch, sequence_failed, 1);
1609 	}
1610 
1611 	/* First notify all users that appended operations to this sequence */
1612 	accel_sequence_complete_tasks(seq);
1613 	accel_sequence_put(seq);
1614 
1615 	/* Then notify the user that finished the sequence */
1616 	cb_fn(cb_arg, status);
1617 }
1618 
1619 static void
1620 accel_update_virt_iov(struct iovec *diov, struct iovec *siov, struct accel_buffer *accel_buf)
1621 {
1622 	uintptr_t offset;
1623 
1624 	offset = (uintptr_t)siov->iov_base & ACCEL_BUFFER_OFFSET_MASK;
1625 	assert(offset < accel_buf->len);
1626 
1627 	diov->iov_base = (char *)accel_buf->buf + offset;
1628 	diov->iov_len = siov->iov_len;
1629 }
1630 
1631 static void
1632 accel_sequence_set_virtbuf(struct spdk_accel_sequence *seq, struct accel_buffer *buf)
1633 {
1634 	struct spdk_accel_task *task;
1635 	struct iovec *iov;
1636 
1637 	/* Now that we've allocated the actual data buffer for this accel_buffer, update all tasks
1638 	 * in a sequence that were using it.
1639 	 */
1640 	TAILQ_FOREACH(task, &seq->tasks, seq_link) {
1641 		if (task->src_domain == g_accel_domain && task->src_domain_ctx == buf) {
1642 			if (!task->has_aux) {
1643 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1644 				assert(task->aux && "Can't allocate aux data structure");
1645 				task->has_aux = true;
1646 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1647 			}
1648 
1649 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_SRC];
1650 			assert(task->s.iovcnt == 1);
1651 			accel_update_virt_iov(iov, &task->s.iovs[0], buf);
1652 			task->src_domain = NULL;
1653 			task->s.iovs = iov;
1654 		}
1655 		if (task->dst_domain == g_accel_domain && task->dst_domain_ctx == buf) {
1656 			if (!task->has_aux) {
1657 				task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1658 				assert(task->aux && "Can't allocate aux data structure");
1659 				task->has_aux = true;
1660 				SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1661 			}
1662 
1663 			iov = &task->aux->iovs[SPDK_ACCEL_AXU_IOV_VIRT_DST];
1664 			assert(task->d.iovcnt == 1);
1665 			accel_update_virt_iov(iov, &task->d.iovs[0], buf);
1666 			task->dst_domain = NULL;
1667 			task->d.iovs = iov;
1668 		}
1669 	}
1670 }
1671 
1672 static void accel_process_sequence(struct spdk_accel_sequence *seq);
1673 
1674 static void
1675 accel_iobuf_get_virtbuf_cb(struct spdk_iobuf_entry *entry, void *buf)
1676 {
1677 	struct accel_buffer *accel_buf;
1678 
1679 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1680 
1681 	assert(accel_buf->seq != NULL);
1682 	assert(accel_buf->buf == NULL);
1683 	accel_buf->buf = buf;
1684 
1685 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
1686 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
1687 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1688 	accel_process_sequence(accel_buf->seq);
1689 }
1690 
1691 static bool
1692 accel_sequence_alloc_buf(struct spdk_accel_sequence *seq, struct accel_buffer *buf,
1693 			 spdk_iobuf_get_cb cb_fn)
1694 {
1695 	struct accel_io_channel *ch = seq->ch;
1696 
1697 	assert(buf->seq == NULL);
1698 
1699 	buf->seq = seq;
1700 
1701 	/* Buffer might be already allocated by memory domain translation. */
1702 	if (buf->buf) {
1703 		return true;
1704 	}
1705 
1706 	buf->buf = spdk_iobuf_get(&ch->iobuf, buf->len, &buf->iobuf, cb_fn);
1707 	if (spdk_unlikely(buf->buf == NULL)) {
1708 		accel_update_stats(ch, retry.iobuf, 1);
1709 		return false;
1710 	}
1711 
1712 	return true;
1713 }
1714 
1715 static bool
1716 accel_sequence_check_virtbuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1717 {
1718 	/* If a task doesn't have dst/src (e.g. fill, crc32), its dst/src domain should be set to
1719 	 * NULL */
1720 	if (task->src_domain == g_accel_domain) {
1721 		if (!accel_sequence_alloc_buf(seq, task->src_domain_ctx,
1722 					      accel_iobuf_get_virtbuf_cb)) {
1723 			return false;
1724 		}
1725 
1726 		accel_sequence_set_virtbuf(seq, task->src_domain_ctx);
1727 	}
1728 
1729 	if (task->dst_domain == g_accel_domain) {
1730 		if (!accel_sequence_alloc_buf(seq, task->dst_domain_ctx,
1731 					      accel_iobuf_get_virtbuf_cb)) {
1732 			return false;
1733 		}
1734 
1735 		accel_sequence_set_virtbuf(seq, task->dst_domain_ctx);
1736 	}
1737 
1738 	return true;
1739 }
1740 
1741 static void
1742 accel_sequence_get_buf_cb(struct spdk_iobuf_entry *entry, void *buf)
1743 {
1744 	struct accel_buffer *accel_buf;
1745 
1746 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1747 
1748 	assert(accel_buf->seq != NULL);
1749 	assert(accel_buf->buf == NULL);
1750 	accel_buf->buf = buf;
1751 
1752 	accel_sequence_set_virtbuf(accel_buf->seq, accel_buf);
1753 	accel_buf->cb_fn(accel_buf->seq, accel_buf->cb_ctx);
1754 }
1755 
1756 bool
1757 spdk_accel_alloc_sequence_buf(struct spdk_accel_sequence *seq, void *buf,
1758 			      struct spdk_memory_domain *domain, void *domain_ctx,
1759 			      spdk_accel_sequence_get_buf_cb cb_fn, void *cb_ctx)
1760 {
1761 	struct accel_buffer *accel_buf = domain_ctx;
1762 
1763 	assert(domain == g_accel_domain);
1764 	accel_buf->cb_fn = cb_fn;
1765 	accel_buf->cb_ctx = cb_ctx;
1766 
1767 	if (!accel_sequence_alloc_buf(seq, accel_buf, accel_sequence_get_buf_cb)) {
1768 		return false;
1769 	}
1770 
1771 	accel_sequence_set_virtbuf(seq, accel_buf);
1772 
1773 	return true;
1774 }
1775 
1776 struct spdk_accel_task *
1777 spdk_accel_sequence_first_task(struct spdk_accel_sequence *seq)
1778 {
1779 	return TAILQ_FIRST(&seq->tasks);
1780 }
1781 
1782 struct spdk_accel_task *
1783 spdk_accel_sequence_next_task(struct spdk_accel_task *task)
1784 {
1785 	return TAILQ_NEXT(task, seq_link);
1786 }
1787 
1788 static inline void
1789 accel_set_bounce_buffer(struct spdk_accel_bounce_buffer *bounce, struct iovec **iovs,
1790 			uint32_t *iovcnt, struct spdk_memory_domain **domain, void **domain_ctx,
1791 			struct accel_buffer *buf)
1792 {
1793 	bounce->orig_iovs = *iovs;
1794 	bounce->orig_iovcnt = *iovcnt;
1795 	bounce->orig_domain = *domain;
1796 	bounce->orig_domain_ctx = *domain_ctx;
1797 	bounce->iov.iov_base = buf->buf;
1798 	bounce->iov.iov_len = buf->len;
1799 
1800 	*iovs = &bounce->iov;
1801 	*iovcnt = 1;
1802 	*domain = NULL;
1803 }
1804 
1805 static void
1806 accel_iobuf_get_src_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1807 {
1808 	struct spdk_accel_task *task;
1809 	struct accel_buffer *accel_buf;
1810 
1811 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1812 	assert(accel_buf->buf == NULL);
1813 	accel_buf->buf = buf;
1814 
1815 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1816 	assert(task != NULL);
1817 
1818 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1819 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1820 	assert(task->aux);
1821 	assert(task->has_aux);
1822 	accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt, &task->src_domain,
1823 				&task->src_domain_ctx, accel_buf);
1824 	accel_process_sequence(accel_buf->seq);
1825 }
1826 
1827 static void
1828 accel_iobuf_get_dst_bounce_cb(struct spdk_iobuf_entry *entry, void *buf)
1829 {
1830 	struct spdk_accel_task *task;
1831 	struct accel_buffer *accel_buf;
1832 
1833 	accel_buf = SPDK_CONTAINEROF(entry, struct accel_buffer, iobuf);
1834 	assert(accel_buf->buf == NULL);
1835 	accel_buf->buf = buf;
1836 
1837 	task = TAILQ_FIRST(&accel_buf->seq->tasks);
1838 	assert(task != NULL);
1839 
1840 	assert(accel_buf->seq->state == ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
1841 	accel_sequence_set_state(accel_buf->seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
1842 	assert(task->aux);
1843 	assert(task->has_aux);
1844 	accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt, &task->dst_domain,
1845 				&task->dst_domain_ctx, accel_buf);
1846 	accel_process_sequence(accel_buf->seq);
1847 }
1848 
1849 static int
1850 accel_sequence_check_bouncebuf(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1851 {
1852 	struct accel_buffer *buf;
1853 
1854 	if (task->src_domain != NULL) {
1855 		/* By the time we're here, accel buffers should have been allocated */
1856 		assert(task->src_domain != g_accel_domain);
1857 
1858 		if (!task->has_aux) {
1859 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1860 			if (spdk_unlikely(!task->aux)) {
1861 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1862 				assert(0);
1863 				return -EAGAIN;
1864 			}
1865 			task->has_aux = true;
1866 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1867 		}
1868 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->s.iovs, task->s.iovcnt));
1869 		if (buf == NULL) {
1870 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1871 			return -ENOMEM;
1872 		}
1873 
1874 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1875 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_src_bounce_cb)) {
1876 			return -EAGAIN;
1877 		}
1878 
1879 		accel_set_bounce_buffer(&task->aux->bounce.s, &task->s.iovs, &task->s.iovcnt,
1880 					&task->src_domain, &task->src_domain_ctx, buf);
1881 	}
1882 
1883 	if (task->dst_domain != NULL) {
1884 		/* By the time we're here, accel buffers should have been allocated */
1885 		assert(task->dst_domain != g_accel_domain);
1886 
1887 		if (!task->has_aux) {
1888 			task->aux = SLIST_FIRST(&task->accel_ch->task_aux_data_pool);
1889 			if (spdk_unlikely(!task->aux)) {
1890 				SPDK_ERRLOG("Can't allocate aux data structure\n");
1891 				assert(0);
1892 				return -EAGAIN;
1893 			}
1894 			task->has_aux = true;
1895 			SLIST_REMOVE_HEAD(&task->accel_ch->task_aux_data_pool, link);
1896 		}
1897 		buf = accel_get_buf(seq->ch, accel_get_iovlen(task->d.iovs, task->d.iovcnt));
1898 		if (buf == NULL) {
1899 			/* The src buffer will be released when a sequence is completed */
1900 			SPDK_ERRLOG("Couldn't allocate buffer descriptor\n");
1901 			return -ENOMEM;
1902 		}
1903 
1904 		SLIST_INSERT_HEAD(&seq->bounce_bufs, buf, link);
1905 		if (!accel_sequence_alloc_buf(seq, buf, accel_iobuf_get_dst_bounce_cb)) {
1906 			return -EAGAIN;
1907 		}
1908 
1909 		accel_set_bounce_buffer(&task->aux->bounce.d, &task->d.iovs, &task->d.iovcnt,
1910 					&task->dst_domain, &task->dst_domain_ctx, buf);
1911 	}
1912 
1913 	return 0;
1914 }
1915 
1916 static void
1917 accel_task_pull_data_cb(void *ctx, int status)
1918 {
1919 	struct spdk_accel_sequence *seq = ctx;
1920 
1921 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
1922 	if (spdk_likely(status == 0)) {
1923 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
1924 	} else {
1925 		accel_sequence_set_fail(seq, status);
1926 	}
1927 
1928 	accel_process_sequence(seq);
1929 }
1930 
1931 static void
1932 accel_task_pull_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1933 {
1934 	int rc;
1935 
1936 	assert(task->has_aux);
1937 	assert(task->aux);
1938 	assert(task->aux->bounce.s.orig_iovs != NULL);
1939 	assert(task->aux->bounce.s.orig_domain != NULL);
1940 	assert(task->aux->bounce.s.orig_domain != g_accel_domain);
1941 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1942 
1943 	rc = spdk_memory_domain_pull_data(task->aux->bounce.s.orig_domain,
1944 					  task->aux->bounce.s.orig_domain_ctx,
1945 					  task->aux->bounce.s.orig_iovs, task->aux->bounce.s.orig_iovcnt,
1946 					  task->s.iovs, task->s.iovcnt,
1947 					  accel_task_pull_data_cb, seq);
1948 	if (spdk_unlikely(rc != 0)) {
1949 		SPDK_ERRLOG("Failed to pull data from memory domain: %s, rc: %d\n",
1950 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1951 		accel_sequence_set_fail(seq, rc);
1952 	}
1953 }
1954 
1955 static void
1956 accel_task_push_data_cb(void *ctx, int status)
1957 {
1958 	struct spdk_accel_sequence *seq = ctx;
1959 
1960 	assert(seq->state == ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
1961 	if (spdk_likely(status == 0)) {
1962 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
1963 	} else {
1964 		accel_sequence_set_fail(seq, status);
1965 	}
1966 
1967 	accel_process_sequence(seq);
1968 }
1969 
1970 static void
1971 accel_task_push_data(struct spdk_accel_sequence *seq, struct spdk_accel_task *task)
1972 {
1973 	int rc;
1974 
1975 	assert(task->has_aux);
1976 	assert(task->aux);
1977 	assert(task->aux->bounce.d.orig_iovs != NULL);
1978 	assert(task->aux->bounce.d.orig_domain != NULL);
1979 	assert(task->aux->bounce.d.orig_domain != g_accel_domain);
1980 	assert(!g_modules_opc[task->op_code].supports_memory_domains);
1981 
1982 	rc = spdk_memory_domain_push_data(task->aux->bounce.d.orig_domain,
1983 					  task->aux->bounce.d.orig_domain_ctx,
1984 					  task->aux->bounce.d.orig_iovs, task->aux->bounce.d.orig_iovcnt,
1985 					  task->d.iovs, task->d.iovcnt,
1986 					  accel_task_push_data_cb, seq);
1987 	if (spdk_unlikely(rc != 0)) {
1988 		SPDK_ERRLOG("Failed to push data to memory domain: %s, rc: %d\n",
1989 			    spdk_memory_domain_get_dma_device_id(task->aux->bounce.s.orig_domain), rc);
1990 		accel_sequence_set_fail(seq, rc);
1991 	}
1992 }
1993 
1994 static void
1995 accel_process_sequence(struct spdk_accel_sequence *seq)
1996 {
1997 	struct accel_io_channel *accel_ch = seq->ch;
1998 	struct spdk_accel_task *task;
1999 	enum accel_sequence_state state;
2000 	int rc;
2001 
2002 	/* Prevent recursive calls to this function */
2003 	if (spdk_unlikely(seq->in_process_sequence)) {
2004 		return;
2005 	}
2006 	seq->in_process_sequence = true;
2007 
2008 	task = TAILQ_FIRST(&seq->tasks);
2009 	do {
2010 		state = seq->state;
2011 		switch (state) {
2012 		case ACCEL_SEQUENCE_STATE_INIT:
2013 			if (g_accel_driver != NULL) {
2014 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS);
2015 				break;
2016 			}
2017 		/* Fall through */
2018 		case ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF:
2019 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF);
2020 			if (!accel_sequence_check_virtbuf(seq, task)) {
2021 				/* We couldn't allocate a buffer, wait until one is available */
2022 				break;
2023 			}
2024 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF);
2025 		/* Fall through */
2026 		case ACCEL_SEQUENCE_STATE_CHECK_BOUNCEBUF:
2027 			/* If a module supports memory domains, we don't need to allocate bounce
2028 			 * buffers */
2029 			if (g_modules_opc[task->op_code].supports_memory_domains) {
2030 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2031 				break;
2032 			}
2033 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF);
2034 			rc = accel_sequence_check_bouncebuf(seq, task);
2035 			if (spdk_unlikely(rc != 0)) {
2036 				/* We couldn't allocate a buffer, wait until one is available */
2037 				if (rc == -EAGAIN) {
2038 					break;
2039 				}
2040 				accel_sequence_set_fail(seq, rc);
2041 				break;
2042 			}
2043 			if (task->has_aux && task->s.iovs == &task->aux->bounce.s.iov) {
2044 				assert(task->aux->bounce.s.orig_iovs);
2045 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PULL_DATA);
2046 				break;
2047 			}
2048 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_EXEC_TASK);
2049 		/* Fall through */
2050 		case ACCEL_SEQUENCE_STATE_EXEC_TASK:
2051 			SPDK_DEBUGLOG(accel, "Executing %s operation, sequence: %p\n",
2052 				      g_opcode_strings[task->op_code], seq);
2053 
2054 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_TASK);
2055 			rc = accel_submit_task(accel_ch, task);
2056 			if (spdk_unlikely(rc != 0)) {
2057 				SPDK_ERRLOG("Failed to submit %s operation, sequence: %p\n",
2058 					    g_opcode_strings[task->op_code], seq);
2059 				accel_sequence_set_fail(seq, rc);
2060 			}
2061 			break;
2062 		case ACCEL_SEQUENCE_STATE_PULL_DATA:
2063 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA);
2064 			accel_task_pull_data(seq, task);
2065 			break;
2066 		case ACCEL_SEQUENCE_STATE_COMPLETE_TASK:
2067 			if (task->has_aux && task->d.iovs == &task->aux->bounce.d.iov) {
2068 				assert(task->aux->bounce.d.orig_iovs);
2069 				accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_PUSH_DATA);
2070 				break;
2071 			}
2072 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_NEXT_TASK);
2073 			break;
2074 		case ACCEL_SEQUENCE_STATE_PUSH_DATA:
2075 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA);
2076 			accel_task_push_data(seq, task);
2077 			break;
2078 		case ACCEL_SEQUENCE_STATE_NEXT_TASK:
2079 			accel_sequence_complete_task(seq, task);
2080 			/* Check if there are any remaining tasks */
2081 			task = TAILQ_FIRST(&seq->tasks);
2082 			if (task == NULL) {
2083 				/* Immediately return here to make sure we don't touch the sequence
2084 				 * after it's completed */
2085 				accel_sequence_complete(seq);
2086 				return;
2087 			}
2088 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_INIT);
2089 			break;
2090 		case ACCEL_SEQUENCE_STATE_DRIVER_EXEC_TASKS:
2091 			assert(!TAILQ_EMPTY(&seq->tasks));
2092 
2093 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2094 			rc = g_accel_driver->execute_sequence(accel_ch->driver_channel, seq);
2095 			if (spdk_unlikely(rc != 0)) {
2096 				SPDK_ERRLOG("Failed to execute sequence: %p using driver: %s\n",
2097 					    seq, g_accel_driver->name);
2098 				accel_sequence_set_fail(seq, rc);
2099 			}
2100 			break;
2101 		case ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS:
2102 			/* Get the task again, as the driver might have completed some tasks
2103 			 * synchronously */
2104 			task = TAILQ_FIRST(&seq->tasks);
2105 			if (task == NULL) {
2106 				/* Immediately return here to make sure we don't touch the sequence
2107 				 * after it's completed */
2108 				accel_sequence_complete(seq);
2109 				return;
2110 			}
2111 			/* We don't want to execute the next task through the driver, so we
2112 			 * explicitly omit the INIT state here */
2113 			accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_CHECK_VIRTBUF);
2114 			break;
2115 		case ACCEL_SEQUENCE_STATE_ERROR:
2116 			/* Immediately return here to make sure we don't touch the sequence
2117 			 * after it's completed */
2118 			assert(seq->status != 0);
2119 			accel_sequence_complete(seq);
2120 			return;
2121 		case ACCEL_SEQUENCE_STATE_AWAIT_VIRTBUF:
2122 		case ACCEL_SEQUENCE_STATE_AWAIT_BOUNCEBUF:
2123 		case ACCEL_SEQUENCE_STATE_AWAIT_PULL_DATA:
2124 		case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2125 		case ACCEL_SEQUENCE_STATE_AWAIT_PUSH_DATA:
2126 		case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2127 			break;
2128 		default:
2129 			assert(0 && "bad state");
2130 			break;
2131 		}
2132 	} while (seq->state != state);
2133 
2134 	seq->in_process_sequence = false;
2135 }
2136 
2137 static void
2138 accel_sequence_task_cb(void *cb_arg, int status)
2139 {
2140 	struct spdk_accel_sequence *seq = cb_arg;
2141 	struct spdk_accel_task *task = TAILQ_FIRST(&seq->tasks);
2142 
2143 	switch (seq->state) {
2144 	case ACCEL_SEQUENCE_STATE_AWAIT_TASK:
2145 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_COMPLETE_TASK);
2146 		if (spdk_unlikely(status != 0)) {
2147 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p\n",
2148 				    g_opcode_strings[task->op_code], seq);
2149 			accel_sequence_set_fail(seq, status);
2150 		}
2151 
2152 		accel_process_sequence(seq);
2153 		break;
2154 	case ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS:
2155 		assert(g_accel_driver != NULL);
2156 		/* Immediately remove the task from the outstanding list to make sure the next call
2157 		 * to spdk_accel_sequence_first_task() doesn't return it */
2158 		accel_sequence_complete_task(seq, task);
2159 		if (spdk_unlikely(status != 0)) {
2160 			SPDK_ERRLOG("Failed to execute %s operation, sequence: %p through "
2161 				    "driver: %s\n", g_opcode_strings[task->op_code], seq,
2162 				    g_accel_driver->name);
2163 			/* Update status without using accel_sequence_set_fail() to avoid changing
2164 			 * seq's state to ERROR until driver calls spdk_accel_sequence_continue() */
2165 			seq->status = status;
2166 		}
2167 		break;
2168 	default:
2169 		assert(0 && "bad state");
2170 		break;
2171 	}
2172 }
2173 
2174 void
2175 spdk_accel_sequence_continue(struct spdk_accel_sequence *seq)
2176 {
2177 	assert(g_accel_driver != NULL);
2178 	assert(seq->state == ACCEL_SEQUENCE_STATE_DRIVER_AWAIT_TASKS);
2179 
2180 	if (spdk_likely(seq->status == 0)) {
2181 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_DRIVER_COMPLETE_TASKS);
2182 	} else {
2183 		accel_sequence_set_state(seq, ACCEL_SEQUENCE_STATE_ERROR);
2184 	}
2185 
2186 	accel_process_sequence(seq);
2187 }
2188 
2189 static bool
2190 accel_compare_iovs(struct iovec *iova, uint32_t iovacnt, struct iovec *iovb, uint32_t iovbcnt)
2191 {
2192 	/* For now, just do a dumb check that the iovecs arrays are exactly the same */
2193 	if (iovacnt != iovbcnt) {
2194 		return false;
2195 	}
2196 
2197 	return memcmp(iova, iovb, sizeof(*iova) * iovacnt) == 0;
2198 }
2199 
2200 static bool
2201 accel_task_set_dstbuf(struct spdk_accel_task *task, struct spdk_accel_task *next)
2202 {
2203 	struct spdk_accel_task *prev;
2204 
2205 	switch (task->op_code) {
2206 	case SPDK_ACCEL_OPC_DECOMPRESS:
2207 	case SPDK_ACCEL_OPC_FILL:
2208 	case SPDK_ACCEL_OPC_ENCRYPT:
2209 	case SPDK_ACCEL_OPC_DECRYPT:
2210 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2211 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2212 		if (task->dst_domain != next->src_domain) {
2213 			return false;
2214 		}
2215 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2216 					next->s.iovs, next->s.iovcnt)) {
2217 			return false;
2218 		}
2219 		task->d.iovs = next->d.iovs;
2220 		task->d.iovcnt = next->d.iovcnt;
2221 		task->dst_domain = next->dst_domain;
2222 		task->dst_domain_ctx = next->dst_domain_ctx;
2223 		break;
2224 	case SPDK_ACCEL_OPC_CRC32C:
2225 		/* crc32 is special, because it doesn't have a dst buffer */
2226 		if (task->src_domain != next->src_domain) {
2227 			return false;
2228 		}
2229 		if (!accel_compare_iovs(task->s.iovs, task->s.iovcnt,
2230 					next->s.iovs, next->s.iovcnt)) {
2231 			return false;
2232 		}
2233 		/* We can only change crc32's buffer if we can change previous task's buffer */
2234 		prev = TAILQ_PREV(task, accel_sequence_tasks, seq_link);
2235 		if (prev == NULL) {
2236 			return false;
2237 		}
2238 		if (!accel_task_set_dstbuf(prev, next)) {
2239 			return false;
2240 		}
2241 		task->s.iovs = next->d.iovs;
2242 		task->s.iovcnt = next->d.iovcnt;
2243 		task->src_domain = next->dst_domain;
2244 		task->src_domain_ctx = next->dst_domain_ctx;
2245 		break;
2246 	default:
2247 		return false;
2248 	}
2249 
2250 	return true;
2251 }
2252 
2253 static void
2254 accel_sequence_merge_tasks(struct spdk_accel_sequence *seq, struct spdk_accel_task *task,
2255 			   struct spdk_accel_task **next_task)
2256 {
2257 	struct spdk_accel_task *next = *next_task;
2258 
2259 	switch (task->op_code) {
2260 	case SPDK_ACCEL_OPC_COPY:
2261 		/* We only allow changing src of operations that actually have a src, e.g. we never
2262 		 * do it for fill.  Theoretically, it is possible, but we'd have to be careful to
2263 		 * change the src of the operation after fill (which in turn could also be a fill).
2264 		 * So, for the sake of simplicity, skip this type of operations for now.
2265 		 */
2266 		if (next->op_code != SPDK_ACCEL_OPC_DECOMPRESS &&
2267 		    next->op_code != SPDK_ACCEL_OPC_COPY &&
2268 		    next->op_code != SPDK_ACCEL_OPC_ENCRYPT &&
2269 		    next->op_code != SPDK_ACCEL_OPC_DECRYPT &&
2270 		    next->op_code != SPDK_ACCEL_OPC_COPY_CRC32C &&
2271 		    next->op_code != SPDK_ACCEL_OPC_DIF_GENERATE_COPY &&
2272 		    next->op_code != SPDK_ACCEL_OPC_DIF_VERIFY_COPY) {
2273 			break;
2274 		}
2275 		if (task->dst_domain != next->src_domain) {
2276 			break;
2277 		}
2278 		if (!accel_compare_iovs(task->d.iovs, task->d.iovcnt,
2279 					next->s.iovs, next->s.iovcnt)) {
2280 			break;
2281 		}
2282 		next->s.iovs = task->s.iovs;
2283 		next->s.iovcnt = task->s.iovcnt;
2284 		next->src_domain = task->src_domain;
2285 		next->src_domain_ctx = task->src_domain_ctx;
2286 		accel_sequence_complete_task(seq, task);
2287 		break;
2288 	case SPDK_ACCEL_OPC_DECOMPRESS:
2289 	case SPDK_ACCEL_OPC_FILL:
2290 	case SPDK_ACCEL_OPC_ENCRYPT:
2291 	case SPDK_ACCEL_OPC_DECRYPT:
2292 	case SPDK_ACCEL_OPC_CRC32C:
2293 	case SPDK_ACCEL_OPC_DIF_GENERATE_COPY:
2294 	case SPDK_ACCEL_OPC_DIF_VERIFY_COPY:
2295 		/* We can only merge tasks when one of them is a copy */
2296 		if (next->op_code != SPDK_ACCEL_OPC_COPY) {
2297 			break;
2298 		}
2299 		if (!accel_task_set_dstbuf(task, next)) {
2300 			break;
2301 		}
2302 		/* We're removing next_task from the tasks queue, so we need to update its pointer,
2303 		 * so that the TAILQ_FOREACH_SAFE() loop below works correctly */
2304 		*next_task = TAILQ_NEXT(next, seq_link);
2305 		accel_sequence_complete_task(seq, next);
2306 		break;
2307 	default:
2308 		assert(0 && "bad opcode");
2309 		break;
2310 	}
2311 }
2312 
2313 void
2314 spdk_accel_sequence_finish(struct spdk_accel_sequence *seq,
2315 			   spdk_accel_completion_cb cb_fn, void *cb_arg)
2316 {
2317 	struct spdk_accel_task *task, *next;
2318 
2319 	/* Try to remove any copy operations if possible */
2320 	TAILQ_FOREACH_SAFE(task, &seq->tasks, seq_link, next) {
2321 		if (next == NULL) {
2322 			break;
2323 		}
2324 		accel_sequence_merge_tasks(seq, task, &next);
2325 	}
2326 
2327 	seq->cb_fn = cb_fn;
2328 	seq->cb_arg = cb_arg;
2329 
2330 	accel_process_sequence(seq);
2331 }
2332 
2333 void
2334 spdk_accel_sequence_reverse(struct spdk_accel_sequence *seq)
2335 {
2336 	struct accel_sequence_tasks tasks = TAILQ_HEAD_INITIALIZER(tasks);
2337 	struct spdk_accel_task *task;
2338 
2339 	TAILQ_SWAP(&tasks, &seq->tasks, spdk_accel_task, seq_link);
2340 
2341 	while (!TAILQ_EMPTY(&tasks)) {
2342 		task = TAILQ_FIRST(&tasks);
2343 		TAILQ_REMOVE(&tasks, task, seq_link);
2344 		TAILQ_INSERT_HEAD(&seq->tasks, task, seq_link);
2345 	}
2346 }
2347 
2348 void
2349 spdk_accel_sequence_abort(struct spdk_accel_sequence *seq)
2350 {
2351 	if (seq == NULL) {
2352 		return;
2353 	}
2354 
2355 	accel_sequence_complete_tasks(seq);
2356 	accel_sequence_put(seq);
2357 }
2358 
2359 struct spdk_memory_domain *
2360 spdk_accel_get_memory_domain(void)
2361 {
2362 	return g_accel_domain;
2363 }
2364 
2365 static struct spdk_accel_module_if *
2366 _module_find_by_name(const char *name)
2367 {
2368 	struct spdk_accel_module_if *accel_module = NULL;
2369 
2370 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
2371 		if (strcmp(name, accel_module->name) == 0) {
2372 			break;
2373 		}
2374 	}
2375 
2376 	return accel_module;
2377 }
2378 
2379 static inline struct spdk_accel_crypto_key *
2380 _accel_crypto_key_get(const char *name)
2381 {
2382 	struct spdk_accel_crypto_key *key;
2383 
2384 	assert(spdk_spin_held(&g_keyring_spin));
2385 
2386 	TAILQ_FOREACH(key, &g_keyring, link) {
2387 		if (strcmp(name, key->param.key_name) == 0) {
2388 			return key;
2389 		}
2390 	}
2391 
2392 	return NULL;
2393 }
2394 
2395 static void
2396 accel_crypto_key_free_mem(struct spdk_accel_crypto_key *key)
2397 {
2398 	if (key->param.hex_key) {
2399 		spdk_memset_s(key->param.hex_key, key->key_size * 2, 0, key->key_size * 2);
2400 		free(key->param.hex_key);
2401 	}
2402 	if (key->param.hex_key2) {
2403 		spdk_memset_s(key->param.hex_key2, key->key2_size * 2, 0, key->key2_size * 2);
2404 		free(key->param.hex_key2);
2405 	}
2406 	free(key->param.tweak_mode);
2407 	free(key->param.key_name);
2408 	free(key->param.cipher);
2409 	if (key->key) {
2410 		spdk_memset_s(key->key, key->key_size, 0, key->key_size);
2411 		free(key->key);
2412 	}
2413 	if (key->key2) {
2414 		spdk_memset_s(key->key2, key->key2_size, 0, key->key2_size);
2415 		free(key->key2);
2416 	}
2417 	free(key);
2418 }
2419 
2420 static void
2421 accel_crypto_key_destroy_unsafe(struct spdk_accel_crypto_key *key)
2422 {
2423 	assert(key->module_if);
2424 	assert(key->module_if->crypto_key_deinit);
2425 
2426 	key->module_if->crypto_key_deinit(key);
2427 	accel_crypto_key_free_mem(key);
2428 }
2429 
2430 /*
2431  * This function mitigates a timing side channel which could be caused by using strcmp()
2432  * Please refer to chapter "Mitigating Information Leakage Based on Variable Timing" in
2433  * the article [1] for more details
2434  * [1] https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/secure-coding/mitigate-timing-side-channel-crypto-implementation.html
2435  */
2436 static bool
2437 accel_aes_xts_keys_equal(const char *k1, size_t k1_len, const char *k2, size_t k2_len)
2438 {
2439 	size_t i;
2440 	volatile size_t x = k1_len ^ k2_len;
2441 
2442 	for (i = 0; ((i < k1_len) & (i < k2_len)); i++) {
2443 		x |= k1[i] ^ k2[i];
2444 	}
2445 
2446 	return x == 0;
2447 }
2448 
2449 static const char *g_tweak_modes[] = {
2450 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_SIMPLE_LBA] = "SIMPLE_LBA",
2451 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_JOIN_NEG_LBA_WITH_LBA] = "JOIN_NEG_LBA_WITH_LBA",
2452 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_FULL_LBA] = "INCR_512_FULL_LBA",
2453 	[SPDK_ACCEL_CRYPTO_TWEAK_MODE_INCR_512_UPPER_LBA] = "INCR_512_UPPER_LBA",
2454 };
2455 
2456 static const char *g_ciphers[] = {
2457 	[SPDK_ACCEL_CIPHER_AES_CBC] = "AES_CBC",
2458 	[SPDK_ACCEL_CIPHER_AES_XTS] = "AES_XTS",
2459 };
2460 
2461 int
2462 spdk_accel_crypto_key_create(const struct spdk_accel_crypto_key_create_param *param)
2463 {
2464 	struct spdk_accel_module_if *module;
2465 	struct spdk_accel_crypto_key *key;
2466 	size_t hex_key_size, hex_key2_size;
2467 	bool found = false;
2468 	size_t i;
2469 	int rc;
2470 
2471 	if (!param || !param->hex_key || !param->cipher || !param->key_name) {
2472 		return -EINVAL;
2473 	}
2474 
2475 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
2476 		/* hardly ever possible, but let's check and warn the user */
2477 		SPDK_ERRLOG("Different accel modules are used for encryption and decryption\n");
2478 	}
2479 	module = g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module;
2480 
2481 	if (!module) {
2482 		SPDK_ERRLOG("No accel module found assigned for crypto operation\n");
2483 		return -ENOENT;
2484 	}
2485 
2486 	if (!module->crypto_key_init || !module->crypto_supports_cipher) {
2487 		SPDK_ERRLOG("Module %s doesn't support crypto operations\n", module->name);
2488 		return -ENOTSUP;
2489 	}
2490 
2491 	key = calloc(1, sizeof(*key));
2492 	if (!key) {
2493 		return -ENOMEM;
2494 	}
2495 
2496 	key->param.key_name = strdup(param->key_name);
2497 	if (!key->param.key_name) {
2498 		rc = -ENOMEM;
2499 		goto error;
2500 	}
2501 
2502 	for (i = 0; i < SPDK_COUNTOF(g_ciphers); ++i) {
2503 		assert(g_ciphers[i]);
2504 
2505 		if (strncmp(param->cipher, g_ciphers[i], strlen(g_ciphers[i])) == 0) {
2506 			key->cipher = i;
2507 			found = true;
2508 			break;
2509 		}
2510 	}
2511 
2512 	if (!found) {
2513 		SPDK_ERRLOG("Failed to parse cipher\n");
2514 		rc = -EINVAL;
2515 		goto error;
2516 	}
2517 
2518 	key->param.cipher = strdup(param->cipher);
2519 	if (!key->param.cipher) {
2520 		rc = -ENOMEM;
2521 		goto error;
2522 	}
2523 
2524 	hex_key_size = strnlen(param->hex_key, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2525 	if (hex_key_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2526 		SPDK_ERRLOG("key1 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2527 		rc = -EINVAL;
2528 		goto error;
2529 	}
2530 
2531 	if (hex_key_size == 0) {
2532 		SPDK_ERRLOG("key1 size cannot be 0\n");
2533 		rc = -EINVAL;
2534 		goto error;
2535 	}
2536 
2537 	key->param.hex_key = strdup(param->hex_key);
2538 	if (!key->param.hex_key) {
2539 		rc = -ENOMEM;
2540 		goto error;
2541 	}
2542 
2543 	key->key_size = hex_key_size / 2;
2544 	key->key = spdk_unhexlify(key->param.hex_key);
2545 	if (!key->key) {
2546 		SPDK_ERRLOG("Failed to unhexlify key1\n");
2547 		rc = -EINVAL;
2548 		goto error;
2549 	}
2550 
2551 	if (param->hex_key2) {
2552 		hex_key2_size = strnlen(param->hex_key2, SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2553 		if (hex_key2_size == SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH) {
2554 			SPDK_ERRLOG("key2 size exceeds max %d\n", SPDK_ACCEL_CRYPTO_KEY_MAX_HEX_LENGTH);
2555 			rc = -EINVAL;
2556 			goto error;
2557 		}
2558 
2559 		if (hex_key2_size == 0) {
2560 			SPDK_ERRLOG("key2 size cannot be 0\n");
2561 			rc = -EINVAL;
2562 			goto error;
2563 		}
2564 
2565 		key->param.hex_key2 = strdup(param->hex_key2);
2566 		if (!key->param.hex_key2) {
2567 			rc = -ENOMEM;
2568 			goto error;
2569 		}
2570 
2571 		key->key2_size = hex_key2_size / 2;
2572 		key->key2 = spdk_unhexlify(key->param.hex_key2);
2573 		if (!key->key2) {
2574 			SPDK_ERRLOG("Failed to unhexlify key2\n");
2575 			rc = -EINVAL;
2576 			goto error;
2577 		}
2578 	}
2579 
2580 	key->tweak_mode = ACCEL_CRYPTO_TWEAK_MODE_DEFAULT;
2581 	if (param->tweak_mode) {
2582 		found = false;
2583 
2584 		key->param.tweak_mode = strdup(param->tweak_mode);
2585 		if (!key->param.tweak_mode) {
2586 			rc = -ENOMEM;
2587 			goto error;
2588 		}
2589 
2590 		for (i = 0; i < SPDK_COUNTOF(g_tweak_modes); ++i) {
2591 			assert(g_tweak_modes[i]);
2592 
2593 			if (strncmp(param->tweak_mode, g_tweak_modes[i], strlen(g_tweak_modes[i])) == 0) {
2594 				key->tweak_mode = i;
2595 				found = true;
2596 				break;
2597 			}
2598 		}
2599 
2600 		if (!found) {
2601 			SPDK_ERRLOG("Failed to parse tweak mode\n");
2602 			rc = -EINVAL;
2603 			goto error;
2604 		}
2605 	}
2606 
2607 	if ((!module->crypto_supports_tweak_mode && key->tweak_mode != ACCEL_CRYPTO_TWEAK_MODE_DEFAULT) ||
2608 	    (module->crypto_supports_tweak_mode && !module->crypto_supports_tweak_mode(key->tweak_mode))) {
2609 		SPDK_ERRLOG("Module %s doesn't support %s tweak mode\n", module->name,
2610 			    g_tweak_modes[key->tweak_mode]);
2611 		rc = -EINVAL;
2612 		goto error;
2613 	}
2614 
2615 	if (!module->crypto_supports_cipher(key->cipher, key->key_size)) {
2616 		SPDK_ERRLOG("Module %s doesn't support %s cipher with %zu key size\n", module->name,
2617 			    g_ciphers[key->cipher], key->key_size);
2618 		rc = -EINVAL;
2619 		goto error;
2620 	}
2621 
2622 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_XTS) {
2623 		if (!key->key2) {
2624 			SPDK_ERRLOG("%s key2 is missing\n", g_ciphers[key->cipher]);
2625 			rc = -EINVAL;
2626 			goto error;
2627 		}
2628 
2629 		if (key->key_size != key->key2_size) {
2630 			SPDK_ERRLOG("%s key size %zu is not equal to key2 size %zu\n", g_ciphers[key->cipher],
2631 				    key->key_size,
2632 				    key->key2_size);
2633 			rc = -EINVAL;
2634 			goto error;
2635 		}
2636 
2637 		if (accel_aes_xts_keys_equal(key->key, key->key_size, key->key2, key->key2_size)) {
2638 			SPDK_ERRLOG("%s identical keys are not secure\n", g_ciphers[key->cipher]);
2639 			rc = -EINVAL;
2640 			goto error;
2641 		}
2642 	}
2643 
2644 	if (key->cipher == SPDK_ACCEL_CIPHER_AES_CBC) {
2645 		if (key->key2_size) {
2646 			SPDK_ERRLOG("%s doesn't use key2\n", g_ciphers[key->cipher]);
2647 			rc = -EINVAL;
2648 			goto error;
2649 		}
2650 	}
2651 
2652 	key->module_if = module;
2653 
2654 	spdk_spin_lock(&g_keyring_spin);
2655 	if (_accel_crypto_key_get(param->key_name)) {
2656 		rc = -EEXIST;
2657 	} else {
2658 		rc = module->crypto_key_init(key);
2659 		if (rc) {
2660 			SPDK_ERRLOG("Module %s failed to initialize crypto key\n", module->name);
2661 		} else {
2662 			TAILQ_INSERT_TAIL(&g_keyring, key, link);
2663 		}
2664 	}
2665 	spdk_spin_unlock(&g_keyring_spin);
2666 
2667 	if (rc) {
2668 		goto error;
2669 	}
2670 
2671 	return 0;
2672 
2673 error:
2674 	accel_crypto_key_free_mem(key);
2675 	return rc;
2676 }
2677 
2678 int
2679 spdk_accel_crypto_key_destroy(struct spdk_accel_crypto_key *key)
2680 {
2681 	if (!key || !key->module_if) {
2682 		return -EINVAL;
2683 	}
2684 
2685 	spdk_spin_lock(&g_keyring_spin);
2686 	if (!_accel_crypto_key_get(key->param.key_name)) {
2687 		spdk_spin_unlock(&g_keyring_spin);
2688 		return -ENOENT;
2689 	}
2690 	TAILQ_REMOVE(&g_keyring, key, link);
2691 	spdk_spin_unlock(&g_keyring_spin);
2692 
2693 	accel_crypto_key_destroy_unsafe(key);
2694 
2695 	return 0;
2696 }
2697 
2698 struct spdk_accel_crypto_key *
2699 spdk_accel_crypto_key_get(const char *name)
2700 {
2701 	struct spdk_accel_crypto_key *key;
2702 
2703 	spdk_spin_lock(&g_keyring_spin);
2704 	key = _accel_crypto_key_get(name);
2705 	spdk_spin_unlock(&g_keyring_spin);
2706 
2707 	return key;
2708 }
2709 
2710 /* Helper function when accel modules register with the framework. */
2711 void
2712 spdk_accel_module_list_add(struct spdk_accel_module_if *accel_module)
2713 {
2714 	struct spdk_accel_module_if *tmp;
2715 
2716 	if (_module_find_by_name(accel_module->name)) {
2717 		SPDK_NOTICELOG("Module %s already registered\n", accel_module->name);
2718 		assert(false);
2719 		return;
2720 	}
2721 
2722 	TAILQ_FOREACH(tmp, &spdk_accel_module_list, tailq) {
2723 		if (accel_module->priority < tmp->priority) {
2724 			break;
2725 		}
2726 	}
2727 
2728 	if (tmp != NULL) {
2729 		TAILQ_INSERT_BEFORE(tmp, accel_module, tailq);
2730 	} else {
2731 		TAILQ_INSERT_TAIL(&spdk_accel_module_list, accel_module, tailq);
2732 	}
2733 }
2734 
2735 /* Framework level channel create callback. */
2736 static int
2737 accel_create_channel(void *io_device, void *ctx_buf)
2738 {
2739 	struct accel_io_channel	*accel_ch = ctx_buf;
2740 	struct spdk_accel_task *accel_task;
2741 	struct spdk_accel_task_aux_data *accel_task_aux;
2742 	struct spdk_accel_sequence *seq;
2743 	struct accel_buffer *buf;
2744 	size_t task_size_aligned;
2745 	uint8_t *task_mem;
2746 	uint32_t i = 0, j;
2747 	int rc;
2748 
2749 	task_size_aligned = SPDK_ALIGN_CEIL(g_max_accel_module_size, SPDK_CACHE_LINE_SIZE);
2750 	accel_ch->task_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2751 				   g_opts.task_count * task_size_aligned);
2752 	if (!accel_ch->task_pool_base) {
2753 		return -ENOMEM;
2754 	}
2755 	memset(accel_ch->task_pool_base, 0, g_opts.task_count * task_size_aligned);
2756 
2757 	accel_ch->seq_pool_base = aligned_alloc(SPDK_CACHE_LINE_SIZE,
2758 						g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2759 	if (accel_ch->seq_pool_base == NULL) {
2760 		goto err;
2761 	}
2762 	memset(accel_ch->seq_pool_base, 0, g_opts.sequence_count * sizeof(struct spdk_accel_sequence));
2763 
2764 	accel_ch->task_aux_data_base = calloc(g_opts.task_count, sizeof(struct spdk_accel_task_aux_data));
2765 	if (accel_ch->task_aux_data_base == NULL) {
2766 		goto err;
2767 	}
2768 
2769 	accel_ch->buf_pool_base = calloc(g_opts.buf_count, sizeof(struct accel_buffer));
2770 	if (accel_ch->buf_pool_base == NULL) {
2771 		goto err;
2772 	}
2773 
2774 	STAILQ_INIT(&accel_ch->task_pool);
2775 	SLIST_INIT(&accel_ch->task_aux_data_pool);
2776 	SLIST_INIT(&accel_ch->seq_pool);
2777 	SLIST_INIT(&accel_ch->buf_pool);
2778 
2779 	task_mem = accel_ch->task_pool_base;
2780 	for (i = 0; i < g_opts.task_count; i++) {
2781 		accel_task = (struct spdk_accel_task *)task_mem;
2782 		accel_task->aux = NULL;
2783 		STAILQ_INSERT_TAIL(&accel_ch->task_pool, accel_task, link);
2784 		task_mem += task_size_aligned;
2785 		accel_task_aux = &accel_ch->task_aux_data_base[i];
2786 		SLIST_INSERT_HEAD(&accel_ch->task_aux_data_pool, accel_task_aux, link);
2787 	}
2788 	for (i = 0; i < g_opts.sequence_count; i++) {
2789 		seq = &accel_ch->seq_pool_base[i];
2790 		SLIST_INSERT_HEAD(&accel_ch->seq_pool, seq, link);
2791 	}
2792 	for (i = 0; i < g_opts.buf_count; i++) {
2793 		buf = &accel_ch->buf_pool_base[i];
2794 		SLIST_INSERT_HEAD(&accel_ch->buf_pool, buf, link);
2795 	}
2796 
2797 	/* Assign modules and get IO channels for each */
2798 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2799 		accel_ch->module_ch[i] = g_modules_opc[i].module->get_io_channel();
2800 		/* This can happen if idxd runs out of channels. */
2801 		if (accel_ch->module_ch[i] == NULL) {
2802 			SPDK_ERRLOG("Module %s failed to get io channel\n", g_modules_opc[i].module->name);
2803 			goto err;
2804 		}
2805 	}
2806 
2807 	if (g_accel_driver != NULL) {
2808 		accel_ch->driver_channel = g_accel_driver->get_io_channel();
2809 		if (accel_ch->driver_channel == NULL) {
2810 			SPDK_ERRLOG("Failed to get driver's IO channel\n");
2811 			goto err;
2812 		}
2813 	}
2814 
2815 	rc = spdk_iobuf_channel_init(&accel_ch->iobuf, "accel", g_opts.small_cache_size,
2816 				     g_opts.large_cache_size);
2817 	if (rc != 0) {
2818 		SPDK_ERRLOG("Failed to initialize iobuf accel channel\n");
2819 		goto err;
2820 	}
2821 
2822 	return 0;
2823 err:
2824 	if (accel_ch->driver_channel != NULL) {
2825 		spdk_put_io_channel(accel_ch->driver_channel);
2826 	}
2827 	for (j = 0; j < i; j++) {
2828 		spdk_put_io_channel(accel_ch->module_ch[j]);
2829 	}
2830 	free(accel_ch->task_pool_base);
2831 	free(accel_ch->task_aux_data_base);
2832 	free(accel_ch->seq_pool_base);
2833 	free(accel_ch->buf_pool_base);
2834 
2835 	return -ENOMEM;
2836 }
2837 
2838 static void
2839 accel_add_stats(struct accel_stats *total, struct accel_stats *stats)
2840 {
2841 	int i;
2842 
2843 	total->sequence_executed += stats->sequence_executed;
2844 	total->sequence_failed += stats->sequence_failed;
2845 	total->sequence_outstanding += stats->sequence_outstanding;
2846 	total->task_outstanding += stats->task_outstanding;
2847 	total->retry.task += stats->retry.task;
2848 	total->retry.sequence += stats->retry.sequence;
2849 	total->retry.iobuf += stats->retry.iobuf;
2850 	total->retry.bufdesc += stats->retry.bufdesc;
2851 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; ++i) {
2852 		total->operations[i].executed += stats->operations[i].executed;
2853 		total->operations[i].failed += stats->operations[i].failed;
2854 		total->operations[i].num_bytes += stats->operations[i].num_bytes;
2855 	}
2856 }
2857 
2858 /* Framework level channel destroy callback. */
2859 static void
2860 accel_destroy_channel(void *io_device, void *ctx_buf)
2861 {
2862 	struct accel_io_channel	*accel_ch = ctx_buf;
2863 	int i;
2864 
2865 	spdk_iobuf_channel_fini(&accel_ch->iobuf);
2866 
2867 	if (accel_ch->driver_channel != NULL) {
2868 		spdk_put_io_channel(accel_ch->driver_channel);
2869 	}
2870 
2871 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
2872 		assert(accel_ch->module_ch[i] != NULL);
2873 		spdk_put_io_channel(accel_ch->module_ch[i]);
2874 		accel_ch->module_ch[i] = NULL;
2875 	}
2876 
2877 	/* Update global stats to make sure channel's stats aren't lost after a channel is gone */
2878 	spdk_spin_lock(&g_stats_lock);
2879 	accel_add_stats(&g_stats, &accel_ch->stats);
2880 	spdk_spin_unlock(&g_stats_lock);
2881 
2882 	free(accel_ch->task_pool_base);
2883 	free(accel_ch->task_aux_data_base);
2884 	free(accel_ch->seq_pool_base);
2885 	free(accel_ch->buf_pool_base);
2886 }
2887 
2888 struct spdk_io_channel *
2889 spdk_accel_get_io_channel(void)
2890 {
2891 	return spdk_get_io_channel(&spdk_accel_module_list);
2892 }
2893 
2894 static int
2895 accel_module_initialize(void)
2896 {
2897 	struct spdk_accel_module_if *accel_module, *tmp_module;
2898 	int rc = 0, module_rc;
2899 
2900 	TAILQ_FOREACH_SAFE(accel_module, &spdk_accel_module_list, tailq, tmp_module) {
2901 		module_rc = accel_module->module_init();
2902 		if (module_rc) {
2903 			TAILQ_REMOVE(&spdk_accel_module_list, accel_module, tailq);
2904 			if (module_rc == -ENODEV) {
2905 				SPDK_NOTICELOG("No devices for module %s, skipping\n", accel_module->name);
2906 			} else if (!rc) {
2907 				SPDK_ERRLOG("Module %s initialization failed with %d\n", accel_module->name, module_rc);
2908 				rc = module_rc;
2909 			}
2910 			continue;
2911 		}
2912 
2913 		SPDK_DEBUGLOG(accel, "Module %s initialized.\n", accel_module->name);
2914 	}
2915 
2916 	return rc;
2917 }
2918 
2919 static void
2920 accel_module_init_opcode(enum spdk_accel_opcode opcode)
2921 {
2922 	struct accel_module *module = &g_modules_opc[opcode];
2923 	struct spdk_accel_module_if *module_if = module->module;
2924 
2925 	if (module_if->get_memory_domains != NULL) {
2926 		module->supports_memory_domains = module_if->get_memory_domains(NULL, 0) > 0;
2927 	}
2928 }
2929 
2930 static int
2931 accel_memory_domain_translate(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
2932 			      struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
2933 			      void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
2934 {
2935 	struct accel_buffer *buf = src_domain_ctx;
2936 
2937 	SPDK_DEBUGLOG(accel, "translate addr %p, len %zu\n", addr, len);
2938 
2939 	assert(g_accel_domain == src_domain);
2940 	assert(spdk_memory_domain_get_system_domain() == dst_domain);
2941 	assert(buf->buf == NULL);
2942 	assert(addr == ACCEL_BUFFER_BASE);
2943 	assert(len == buf->len);
2944 
2945 	buf->buf = spdk_iobuf_get(&buf->ch->iobuf, buf->len, NULL, NULL);
2946 	if (spdk_unlikely(buf->buf == NULL)) {
2947 		return -ENOMEM;
2948 	}
2949 
2950 	result->iov_count = 1;
2951 	result->iov.iov_base = buf->buf;
2952 	result->iov.iov_len = buf->len;
2953 	SPDK_DEBUGLOG(accel, "translated addr %p\n", result->iov.iov_base);
2954 	return 0;
2955 }
2956 
2957 static void
2958 accel_memory_domain_invalidate(struct spdk_memory_domain *domain, void *domain_ctx,
2959 			       struct iovec *iov, uint32_t iovcnt)
2960 {
2961 	struct accel_buffer *buf = domain_ctx;
2962 
2963 	SPDK_DEBUGLOG(accel, "invalidate addr %p, len %zu\n", iov[0].iov_base, iov[0].iov_len);
2964 
2965 	assert(g_accel_domain == domain);
2966 	assert(iovcnt == 1);
2967 	assert(buf->buf != NULL);
2968 	assert(iov[0].iov_base == buf->buf);
2969 	assert(iov[0].iov_len == buf->len);
2970 
2971 	spdk_iobuf_put(&buf->ch->iobuf, buf->buf, buf->len);
2972 	buf->buf = NULL;
2973 }
2974 
2975 int
2976 spdk_accel_initialize(void)
2977 {
2978 	enum spdk_accel_opcode op;
2979 	struct spdk_accel_module_if *accel_module = NULL;
2980 	int rc;
2981 
2982 	/*
2983 	 * We need a unique identifier for the accel framework, so use the
2984 	 * spdk_accel_module_list address for this purpose.
2985 	 */
2986 	spdk_io_device_register(&spdk_accel_module_list, accel_create_channel, accel_destroy_channel,
2987 				sizeof(struct accel_io_channel), "accel");
2988 
2989 	spdk_spin_init(&g_keyring_spin);
2990 	spdk_spin_init(&g_stats_lock);
2991 
2992 	rc = spdk_memory_domain_create(&g_accel_domain, SPDK_DMA_DEVICE_TYPE_ACCEL, NULL,
2993 				       "SPDK_ACCEL_DMA_DEVICE");
2994 	if (rc != 0) {
2995 		SPDK_ERRLOG("Failed to create accel memory domain\n");
2996 		return rc;
2997 	}
2998 
2999 	spdk_memory_domain_set_translation(g_accel_domain, accel_memory_domain_translate);
3000 	spdk_memory_domain_set_invalidate(g_accel_domain, accel_memory_domain_invalidate);
3001 
3002 	g_modules_started = true;
3003 	rc = accel_module_initialize();
3004 	if (rc) {
3005 		return rc;
3006 	}
3007 
3008 	if (g_accel_driver != NULL && g_accel_driver->init != NULL) {
3009 		rc = g_accel_driver->init();
3010 		if (rc != 0) {
3011 			SPDK_ERRLOG("Failed to initialize driver %s: %s\n", g_accel_driver->name,
3012 				    spdk_strerror(-rc));
3013 			return rc;
3014 		}
3015 	}
3016 
3017 	/* The module list is order by priority, with the highest priority modules being at the end
3018 	 * of the list.  The software module should be somewhere at the beginning of the list,
3019 	 * before all HW modules.
3020 	 * NOTE: all opcodes must be supported by software in the event that no HW modules are
3021 	 * initialized to support the operation.
3022 	 */
3023 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3024 		for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3025 			if (accel_module->supports_opcode(op)) {
3026 				g_modules_opc[op].module = accel_module;
3027 				SPDK_DEBUGLOG(accel, "OPC 0x%x now assigned to %s\n", op, accel_module->name);
3028 			}
3029 		}
3030 
3031 		if (accel_module->get_ctx_size != NULL) {
3032 			g_max_accel_module_size = spdk_max(g_max_accel_module_size,
3033 							   accel_module->get_ctx_size());
3034 		}
3035 	}
3036 
3037 	/* Now lets check for overrides and apply all that exist */
3038 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3039 		if (g_modules_opc_override[op] != NULL) {
3040 			accel_module = _module_find_by_name(g_modules_opc_override[op]);
3041 			if (accel_module == NULL) {
3042 				SPDK_ERRLOG("Invalid module name of %s\n", g_modules_opc_override[op]);
3043 				return -EINVAL;
3044 			}
3045 			if (accel_module->supports_opcode(op) == false) {
3046 				SPDK_ERRLOG("Module %s does not support op code %d\n", accel_module->name, op);
3047 				return -EINVAL;
3048 			}
3049 			g_modules_opc[op].module = accel_module;
3050 		}
3051 	}
3052 
3053 	if (g_modules_opc[SPDK_ACCEL_OPC_ENCRYPT].module != g_modules_opc[SPDK_ACCEL_OPC_DECRYPT].module) {
3054 		SPDK_ERRLOG("Different accel modules are assigned to encrypt and decrypt operations");
3055 		return -EINVAL;
3056 	}
3057 
3058 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3059 		assert(g_modules_opc[op].module != NULL);
3060 		accel_module_init_opcode(op);
3061 	}
3062 
3063 	rc = spdk_iobuf_register_module("accel");
3064 	if (rc != 0) {
3065 		SPDK_ERRLOG("Failed to register accel iobuf module\n");
3066 		return rc;
3067 	}
3068 
3069 	return 0;
3070 }
3071 
3072 static void
3073 accel_module_finish_cb(void)
3074 {
3075 	spdk_accel_fini_cb cb_fn = g_fini_cb_fn;
3076 
3077 	cb_fn(g_fini_cb_arg);
3078 	g_fini_cb_fn = NULL;
3079 	g_fini_cb_arg = NULL;
3080 }
3081 
3082 static void
3083 accel_write_overridden_opc(struct spdk_json_write_ctx *w, const char *opc_str,
3084 			   const char *module_str)
3085 {
3086 	spdk_json_write_object_begin(w);
3087 	spdk_json_write_named_string(w, "method", "accel_assign_opc");
3088 	spdk_json_write_named_object_begin(w, "params");
3089 	spdk_json_write_named_string(w, "opname", opc_str);
3090 	spdk_json_write_named_string(w, "module", module_str);
3091 	spdk_json_write_object_end(w);
3092 	spdk_json_write_object_end(w);
3093 }
3094 
3095 static void
3096 __accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3097 {
3098 	spdk_json_write_named_string(w, "name", key->param.key_name);
3099 	spdk_json_write_named_string(w, "cipher", key->param.cipher);
3100 	spdk_json_write_named_string(w, "key", key->param.hex_key);
3101 	if (key->param.hex_key2) {
3102 		spdk_json_write_named_string(w, "key2", key->param.hex_key2);
3103 	}
3104 
3105 	if (key->param.tweak_mode) {
3106 		spdk_json_write_named_string(w, "tweak_mode", key->param.tweak_mode);
3107 	}
3108 }
3109 
3110 void
3111 _accel_crypto_key_dump_param(struct spdk_json_write_ctx *w, struct spdk_accel_crypto_key *key)
3112 {
3113 	spdk_json_write_object_begin(w);
3114 	__accel_crypto_key_dump_param(w, key);
3115 	spdk_json_write_object_end(w);
3116 }
3117 
3118 static void
3119 _accel_crypto_key_write_config_json(struct spdk_json_write_ctx *w,
3120 				    struct spdk_accel_crypto_key *key)
3121 {
3122 	spdk_json_write_object_begin(w);
3123 	spdk_json_write_named_string(w, "method", "accel_crypto_key_create");
3124 	spdk_json_write_named_object_begin(w, "params");
3125 	__accel_crypto_key_dump_param(w, key);
3126 	spdk_json_write_object_end(w);
3127 	spdk_json_write_object_end(w);
3128 }
3129 
3130 static void
3131 accel_write_options(struct spdk_json_write_ctx *w)
3132 {
3133 	spdk_json_write_object_begin(w);
3134 	spdk_json_write_named_string(w, "method", "accel_set_options");
3135 	spdk_json_write_named_object_begin(w, "params");
3136 	spdk_json_write_named_uint32(w, "small_cache_size", g_opts.small_cache_size);
3137 	spdk_json_write_named_uint32(w, "large_cache_size", g_opts.large_cache_size);
3138 	spdk_json_write_named_uint32(w, "task_count", g_opts.task_count);
3139 	spdk_json_write_named_uint32(w, "sequence_count", g_opts.sequence_count);
3140 	spdk_json_write_named_uint32(w, "buf_count", g_opts.buf_count);
3141 	spdk_json_write_object_end(w);
3142 	spdk_json_write_object_end(w);
3143 }
3144 
3145 static void
3146 _accel_crypto_keys_write_config_json(struct spdk_json_write_ctx *w, bool full_dump)
3147 {
3148 	struct spdk_accel_crypto_key *key;
3149 
3150 	spdk_spin_lock(&g_keyring_spin);
3151 	TAILQ_FOREACH(key, &g_keyring, link) {
3152 		if (full_dump) {
3153 			_accel_crypto_key_write_config_json(w, key);
3154 		} else {
3155 			_accel_crypto_key_dump_param(w, key);
3156 		}
3157 	}
3158 	spdk_spin_unlock(&g_keyring_spin);
3159 }
3160 
3161 void
3162 _accel_crypto_keys_dump_param(struct spdk_json_write_ctx *w)
3163 {
3164 	_accel_crypto_keys_write_config_json(w, false);
3165 }
3166 
3167 void
3168 spdk_accel_write_config_json(struct spdk_json_write_ctx *w)
3169 {
3170 	struct spdk_accel_module_if *accel_module;
3171 	int i;
3172 
3173 	spdk_json_write_array_begin(w);
3174 	accel_write_options(w);
3175 
3176 	TAILQ_FOREACH(accel_module, &spdk_accel_module_list, tailq) {
3177 		if (accel_module->write_config_json) {
3178 			accel_module->write_config_json(w);
3179 		}
3180 	}
3181 	for (i = 0; i < SPDK_ACCEL_OPC_LAST; i++) {
3182 		if (g_modules_opc_override[i]) {
3183 			accel_write_overridden_opc(w, g_opcode_strings[i], g_modules_opc_override[i]);
3184 		}
3185 	}
3186 
3187 	_accel_crypto_keys_write_config_json(w, true);
3188 
3189 	spdk_json_write_array_end(w);
3190 }
3191 
3192 void
3193 spdk_accel_module_finish(void)
3194 {
3195 	if (!g_accel_module) {
3196 		g_accel_module = TAILQ_FIRST(&spdk_accel_module_list);
3197 	} else {
3198 		g_accel_module = TAILQ_NEXT(g_accel_module, tailq);
3199 	}
3200 
3201 	if (!g_accel_module) {
3202 		if (g_accel_driver != NULL && g_accel_driver->fini != NULL) {
3203 			g_accel_driver->fini();
3204 		}
3205 
3206 		spdk_spin_destroy(&g_keyring_spin);
3207 		spdk_spin_destroy(&g_stats_lock);
3208 		if (g_accel_domain) {
3209 			spdk_memory_domain_destroy(g_accel_domain);
3210 			g_accel_domain = NULL;
3211 		}
3212 		accel_module_finish_cb();
3213 		return;
3214 	}
3215 
3216 	if (g_accel_module->module_fini) {
3217 		spdk_thread_send_msg(spdk_get_thread(), g_accel_module->module_fini, NULL);
3218 	} else {
3219 		spdk_accel_module_finish();
3220 	}
3221 }
3222 
3223 static void
3224 accel_io_device_unregister_cb(void *io_device)
3225 {
3226 	struct spdk_accel_crypto_key *key, *key_tmp;
3227 	enum spdk_accel_opcode op;
3228 
3229 	spdk_spin_lock(&g_keyring_spin);
3230 	TAILQ_FOREACH_SAFE(key, &g_keyring, link, key_tmp) {
3231 		accel_crypto_key_destroy_unsafe(key);
3232 	}
3233 	spdk_spin_unlock(&g_keyring_spin);
3234 
3235 	for (op = 0; op < SPDK_ACCEL_OPC_LAST; op++) {
3236 		if (g_modules_opc_override[op] != NULL) {
3237 			free(g_modules_opc_override[op]);
3238 			g_modules_opc_override[op] = NULL;
3239 		}
3240 		g_modules_opc[op].module = NULL;
3241 	}
3242 
3243 	spdk_accel_module_finish();
3244 }
3245 
3246 void
3247 spdk_accel_finish(spdk_accel_fini_cb cb_fn, void *cb_arg)
3248 {
3249 	assert(cb_fn != NULL);
3250 
3251 	g_fini_cb_fn = cb_fn;
3252 	g_fini_cb_arg = cb_arg;
3253 
3254 	spdk_io_device_unregister(&spdk_accel_module_list, accel_io_device_unregister_cb);
3255 }
3256 
3257 static struct spdk_accel_driver *
3258 accel_find_driver(const char *name)
3259 {
3260 	struct spdk_accel_driver *driver;
3261 
3262 	TAILQ_FOREACH(driver, &g_accel_drivers, tailq) {
3263 		if (strcmp(driver->name, name) == 0) {
3264 			return driver;
3265 		}
3266 	}
3267 
3268 	return NULL;
3269 }
3270 
3271 int
3272 spdk_accel_set_driver(const char *name)
3273 {
3274 	struct spdk_accel_driver *driver;
3275 
3276 	driver = accel_find_driver(name);
3277 	if (driver == NULL) {
3278 		SPDK_ERRLOG("Couldn't find driver named '%s'\n", name);
3279 		return -ENODEV;
3280 	}
3281 
3282 	g_accel_driver = driver;
3283 
3284 	return 0;
3285 }
3286 
3287 const char *
3288 spdk_accel_get_driver_name(void)
3289 {
3290 	if (!g_accel_driver) {
3291 		return NULL;
3292 	}
3293 
3294 	return g_accel_driver->name;
3295 }
3296 
3297 void
3298 spdk_accel_driver_register(struct spdk_accel_driver *driver)
3299 {
3300 	if (accel_find_driver(driver->name)) {
3301 		SPDK_ERRLOG("Driver named '%s' has already been registered\n", driver->name);
3302 		assert(0);
3303 		return;
3304 	}
3305 
3306 	TAILQ_INSERT_TAIL(&g_accel_drivers, driver, tailq);
3307 }
3308 
3309 int
3310 spdk_accel_set_opts(const struct spdk_accel_opts *opts)
3311 {
3312 	if (!opts) {
3313 		SPDK_ERRLOG("opts cannot be NULL\n");
3314 		return -1;
3315 	}
3316 
3317 	if (!opts->opts_size) {
3318 		SPDK_ERRLOG("opts_size inside opts cannot be zero value\n");
3319 		return -1;
3320 	}
3321 
3322 	if (SPDK_GET_FIELD(opts, task_count, g_opts.task_count,
3323 			   opts->opts_size) < ACCEL_TASKS_IN_SEQUENCE_LIMIT) {
3324 		return -EINVAL;
3325 	}
3326 
3327 #define SET_FIELD(field) \
3328         if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts->opts_size) { \
3329                 g_opts.field = opts->field; \
3330         } \
3331 
3332 	SET_FIELD(small_cache_size);
3333 	SET_FIELD(large_cache_size);
3334 	SET_FIELD(task_count);
3335 	SET_FIELD(sequence_count);
3336 	SET_FIELD(buf_count);
3337 
3338 	g_opts.opts_size = opts->opts_size;
3339 
3340 #undef SET_FIELD
3341 
3342 	return 0;
3343 }
3344 
3345 void
3346 spdk_accel_get_opts(struct spdk_accel_opts *opts, size_t opts_size)
3347 {
3348 	if (!opts) {
3349 		SPDK_ERRLOG("opts should not be NULL\n");
3350 		return;
3351 	}
3352 
3353 	if (!opts_size) {
3354 		SPDK_ERRLOG("opts_size should not be zero value\n");
3355 		return;
3356 	}
3357 
3358 	opts->opts_size = opts_size;
3359 
3360 #define SET_FIELD(field) \
3361 	if (offsetof(struct spdk_accel_opts, field) + sizeof(opts->field) <= opts_size) { \
3362 		opts->field = g_opts.field; \
3363 	} \
3364 
3365 	SET_FIELD(small_cache_size);
3366 	SET_FIELD(large_cache_size);
3367 	SET_FIELD(task_count);
3368 	SET_FIELD(sequence_count);
3369 	SET_FIELD(buf_count);
3370 
3371 #undef SET_FIELD
3372 
3373 	/* Do not remove this statement, you should always update this statement when you adding a new field,
3374 	 * and do not forget to add the SET_FIELD statement for your added field. */
3375 	SPDK_STATIC_ASSERT(sizeof(struct spdk_accel_opts) == 28, "Incorrect size");
3376 }
3377 
3378 struct accel_get_stats_ctx {
3379 	struct accel_stats	stats;
3380 	accel_get_stats_cb	cb_fn;
3381 	void			*cb_arg;
3382 };
3383 
3384 static void
3385 accel_get_channel_stats_done(struct spdk_io_channel_iter *iter, int status)
3386 {
3387 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3388 
3389 	ctx->cb_fn(&ctx->stats, ctx->cb_arg);
3390 	free(ctx);
3391 }
3392 
3393 static void
3394 accel_get_channel_stats(struct spdk_io_channel_iter *iter)
3395 {
3396 	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(iter);
3397 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3398 	struct accel_get_stats_ctx *ctx = spdk_io_channel_iter_get_ctx(iter);
3399 
3400 	accel_add_stats(&ctx->stats, &accel_ch->stats);
3401 	spdk_for_each_channel_continue(iter, 0);
3402 }
3403 
3404 int
3405 accel_get_stats(accel_get_stats_cb cb_fn, void *cb_arg)
3406 {
3407 	struct accel_get_stats_ctx *ctx;
3408 
3409 	ctx = calloc(1, sizeof(*ctx));
3410 	if (ctx == NULL) {
3411 		return -ENOMEM;
3412 	}
3413 
3414 	spdk_spin_lock(&g_stats_lock);
3415 	accel_add_stats(&ctx->stats, &g_stats);
3416 	spdk_spin_unlock(&g_stats_lock);
3417 
3418 	ctx->cb_fn = cb_fn;
3419 	ctx->cb_arg = cb_arg;
3420 
3421 	spdk_for_each_channel(&spdk_accel_module_list, accel_get_channel_stats, ctx,
3422 			      accel_get_channel_stats_done);
3423 
3424 	return 0;
3425 }
3426 
3427 void
3428 spdk_accel_get_opcode_stats(struct spdk_io_channel *ch, enum spdk_accel_opcode opcode,
3429 			    struct spdk_accel_opcode_stats *stats, size_t size)
3430 {
3431 	struct accel_io_channel *accel_ch = spdk_io_channel_get_ctx(ch);
3432 
3433 #define FIELD_OK(field) \
3434 	offsetof(struct spdk_accel_opcode_stats, field) + sizeof(stats->field) <= size
3435 
3436 #define SET_FIELD(field, value) \
3437 	if (FIELD_OK(field)) { \
3438 		stats->field = value; \
3439 	}
3440 
3441 	SET_FIELD(executed, accel_ch->stats.operations[opcode].executed);
3442 	SET_FIELD(failed, accel_ch->stats.operations[opcode].failed);
3443 	SET_FIELD(num_bytes, accel_ch->stats.operations[opcode].num_bytes);
3444 
3445 #undef FIELD_OK
3446 #undef SET_FIELD
3447 }
3448 
3449 uint8_t
3450 spdk_accel_get_buf_align(enum spdk_accel_opcode opcode,
3451 			 const struct spdk_accel_operation_exec_ctx *ctx)
3452 {
3453 	struct spdk_accel_module_if *module = g_modules_opc[opcode].module;
3454 	struct spdk_accel_opcode_info modinfo = {}, drvinfo = {};
3455 
3456 	if (g_accel_driver != NULL && g_accel_driver->get_operation_info != NULL) {
3457 		g_accel_driver->get_operation_info(opcode, ctx, &drvinfo);
3458 	}
3459 
3460 	if (module->get_operation_info != NULL) {
3461 		module->get_operation_info(opcode, ctx, &modinfo);
3462 	}
3463 
3464 	/* If a driver is set, it'll execute most of the operations, while the rest will usually
3465 	 * fall back to accel_sw, which doesn't have any alignment requirements.  However, to be
3466 	 * extra safe, return the max(driver, module) if a driver delegates some operations to a
3467 	 * hardware module. */
3468 	return spdk_max(modinfo.required_alignment, drvinfo.required_alignment);
3469 }
3470 
3471 struct spdk_accel_module_if *
3472 spdk_accel_get_module(const char *name)
3473 {
3474 	struct spdk_accel_module_if *module;
3475 
3476 	TAILQ_FOREACH(module, &spdk_accel_module_list, tailq) {
3477 		if (strcmp(module->name, name) == 0) {
3478 			return module;
3479 		}
3480 	}
3481 
3482 	return NULL;
3483 }
3484 
3485 int
3486 spdk_accel_get_opc_memory_domains(enum spdk_accel_opcode opcode,
3487 				  struct spdk_memory_domain **domains,
3488 				  int array_size)
3489 {
3490 	assert(opcode < SPDK_ACCEL_OPC_LAST);
3491 
3492 	if (g_modules_opc[opcode].module->get_memory_domains) {
3493 		return g_modules_opc[opcode].module->get_memory_domains(domains, array_size);
3494 	}
3495 
3496 	return 0;
3497 }
3498 
3499 SPDK_LOG_REGISTER_COMPONENT(accel)
3500